1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158 | #include "moab/ParallelComm.hpp"
#include "MBParallelConventions.h"
#include "ReadParallel.hpp"
#include "moab/FileOptions.hpp"
#include "MBTagConventions.hpp"
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "TestUtil.hpp"
#include <iostream>
#include <algorithm>
#include <sstream>
#include <cassert>
#if !defined( _MSC_VER ) && !defined( __MINGW32__ )
#include <unistd.h>
#endif
using namespace moab;
#define CHKERR( a ) \
do \
{ \
ErrorCode val = ( a ); \
if( MB_SUCCESS != val ) \
{ \
std::cerr << "Error code " << val << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
return val; \
} \
} while( false )
#define PCHECK( A ) \
if( is_any_proc_error( !( A ) ) ) return report_error( __FILE__, __LINE__ )
ErrorCode report_error( const char* file, int line )
{
std::cerr << "Failure at " << file << ':' << line << std::endl;
return MB_FAILURE;
}
ErrorCode test_read( const char* filename, const char* option );
#define RUN_TEST_ARG3( A, B, C ) run_test( &( A ), #A, B, C )
int is_any_proc_error( int is_my_error )
{
int result = 0;
int err = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
return err || result;
}
int run_test( ErrorCode ( *func )( const char*, const char* ),
const char* func_name,
const std::string& file_name,
const char* option )
{
ErrorCode result = ( *func )( file_name.c_str(), option );
int is_err = is_any_proc_error( ( MB_SUCCESS != result ) );
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if( rank == 0 )
{
if( is_err )
std::cout << func_name << " : FAILED!!" << std::endl;
else
std::cout << func_name << " : success" << std::endl;
}
return is_err;
}
int main( int argc, char* argv[] )
{
int rank, size;
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm_size( MPI_COMM_WORLD, &size );
int num_errors = 0;
const char* option;<--- The scope of the variable 'option' can be reduced. [+]The scope of the variable 'option' can be reduced. Warning: Be careful when fixing this message, especially when there are inner loops. Here is an example where cppcheck will write that the scope for 'i' can be reduced:
void f(int x)
{
int i = 0;
if (x) {
// it's safe to move 'int i = 0;' here
for (int n = 0; n < 10; ++n) {
// it is possible but not safe to move 'int i = 0;' here
do_something(&i);
}
}
}
When you see this message it is always safe to reduce the variable scope 1 level.
std::string vtk_test_filename = TestDir + "unittest/hex_2048.vtk";
#ifdef MOAB_HAVE_HDF5
std::string filename;
if( 1 < argc )
filename = std::string( argv[1] );
else
filename = TestDir + "unittest/64bricks_512hex.h5m";
//=========== read_delete, geom_dimension, resolve_shared
option = "PARALLEL=READ_DELETE;PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;PARTITION_DISTRIBUTE;"
"PARALLEL_RESOLVE_SHARED_ENTS;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== read_delete, material_set, resolve_shared
option = "PARALLEL=READ_DELETE;PARTITION=MATERIAL_SET;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_"
"SHARED_ENTS;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== bcast_delete, geom_dimension, resolve_shared
option = "PARALLEL=BCAST_DELETE;PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;PARTITION_DISTRIBUTE;"
"PARALLEL_RESOLVE_SHARED_ENTS;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== bcast_delete, material_set, resolve_shared
option = "PARALLEL=BCAST_DELETE;PARTITION=MATERIAL_SET;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_"
"SHARED_ENTS;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== read_delete, geom_dimension, resolve_shared, exch ghost
option = "PARALLEL=READ_DELETE;PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;PARTITION_DISTRIBUTE;"
"PARALLEL_RESOLVE_SHARED_ENTS;PARALLEL_GHOSTS=3.0.1;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== read_delete, material_set, resolve_shared, exch ghost
option = "PARALLEL=READ_DELETE;PARTITION=MATERIAL_SET;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_"
"SHARED_ENTS;PARALLEL_GHOSTS=3.0.1;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== bcast_delete, geom_dimension, resolve_shared, exch ghost
option = "PARALLEL=BCAST_DELETE;PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;PARTITION_DISTRIBUTE;"
"PARALLEL_RESOLVE_SHARED_ENTS;PARALLEL_GHOSTS=3.0.1;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
//=========== bcast_delete, material_set, resolve_shared, exch ghost
option = "PARALLEL=BCAST_DELETE;PARTITION=MATERIAL_SET;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_"
"SHARED_ENTS;PARALLEL_GHOSTS=3.0.1;";
num_errors += RUN_TEST_ARG3( test_read, filename, option );
#endif
if( vtk_test_filename.size() )
{
//=========== bcast_delete, trivial, resolve_shared
option = "PARALLEL=BCAST_DELETE;PARTITION=TRIVIAL;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_"
"SHARED_ENTS;";
num_errors += RUN_TEST_ARG3( test_read, vtk_test_filename, option );
//=========== bcast_delete, trivial, resolve_shared + ghosting
option = "PARALLEL=BCAST_DELETE;PARTITION=TRIVIAL;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_"
"SHARED_ENTS;PARALLEL_GHOSTS=3.0.1;";
num_errors += RUN_TEST_ARG3( test_read, vtk_test_filename, option );
}
MPI_Finalize();
return num_errors;
}
ErrorCode test_read( const char* filename, const char* option )
{
Core mb_instance;
Interface& moab = mb_instance;
ErrorCode rval;
rval = moab.load_file( filename, 0, option );CHKERR( rval );
ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
rval = pcomm->check_all_shared_handles();CHKERR( rval );
return MB_SUCCESS;
}
|