dune-alugrid  2.6-git
structuredgridfactory.hh
Go to the documentation of this file.
1 #ifndef DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
2 #define DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
3 
4 #include <array>
5 #include <memory>
6 #include <vector>
7 
8 #include <dune/common/version.hh>
9 
10 #include <dune/common/classname.hh>
11 #include <dune/common/exceptions.hh>
12 #include <dune/common/fvector.hh>
13 #include <dune/common/shared_ptr.hh>
14 #include <dune/common/version.hh>
15 
16 #include <dune/grid/common/gridfactory.hh>
17 #include <dune/grid/common/exceptions.hh>
18 
21 
23 
24 // include DGF parser implementation for YaspGrid
25 #include <dune/grid/io/file/dgfparser/dgfyasp.hh>
26 
27 namespace Dune
28 {
29 
30  // External Forward Declarations
31  // -----------------------------
32 
33  template< class Grid >
35 
36 
37 
38  // StructuredGridFactory for ALUGrid
39  // ---------------------------------
40 
41  template< int dim, int dimworld, ALUGridElementType eltype, ALUGridRefinementType refineType, class Comm >
42  class StructuredGridFactory< ALUGrid< dim, dimworld, eltype, refineType, Comm > >
43  {
44  public:
46 #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 6)
47  typedef std::unique_ptr< Grid > SharedPtrType;
48 #else // #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 6)
49  // mygrid_ptr in GridPtr is a derived from std::shared_ptr and implements a method release
50  typedef typename Dune::GridPtr< Grid > :: mygrid_ptr SharedPtrType;
51 #endif // #else // #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 6)
52 
53  protected:
55 
56  private:
57  // SimplePartitioner
58  // -----------------
59  template< class GV, PartitionIteratorType pitype, class IS = typename GV::IndexSet >
60  class SimplePartitioner
61  {
62  typedef SimplePartitioner< GV, pitype, IS > This;
63 
64  public:
65  typedef GV GridView;
66  typedef typename GridView::Grid Grid;
67 
68  typedef IS IndexSet;
69 
70  protected:
71  typedef typename IndexSet::IndexType IndexType;
72 
73  static const int dimension = Grid::dimension;
74 
75  typedef typename Grid::template Codim< 0 >::Entity Element;
76 
77  typedef typename Element::Geometry::GlobalCoordinate VertexType;
78 
79  // type of communicator
80  typedef Dune :: CollectiveCommunication< typename MPIHelper :: MPICommunicator >
82 
83 #ifdef USE_ALUGRID_SFC_ORDERING
84  typedef SpaceFillingCurveOrdering< VertexType > SpaceFillingCurveOrderingType;
85 #endif
86 
87  public:
88  SimplePartitioner ( const GridView &gridView, const CollectiveCommunication& comm,
89  const VertexType& lowerLeft, const VertexType& upperRight )
90  : comm_( comm ),
91  gridView_( gridView ),
92  indexSet_( gridView_.indexSet() ),
93  pSize_( comm_.size() ),
94  elementCuts_( pSize_, -1 ),
95 #ifdef USE_ALUGRID_SFC_ORDERING
96  sfc_( SpaceFillingCurveOrderingType::ZCurve, lowerLeft, upperRight, comm_ ),
97 #endif
98  maxIndex_( -1.0 )
99  {
100 #ifdef USE_ALUGRID_SFC_ORDERING
101  const auto end = gridView_.template end< 0 > ();
102  for( auto it = gridView_.template begin< 0 > (); it != end; ++it )
103  {
104  VertexType center = (*it).geometry().center();
105  // get hilbert index in [0,1]
106  const double hidx = sfc_.index( center );
107  maxIndex_ = std::max( maxIndex_, hidx );
108  }
109 
110  // adjust with number of elements
111  maxIndex_ /= indexSet_.size( 0 );
112 #endif
113 
114  // compute decomposition of sfc
115  calculateElementCuts();
116  }
117 
118  public:
119  template< class Entity >
120  double index( const Entity &entity ) const
121  {
122  alugrid_assert ( Entity::codimension == 0 );
123 #ifdef USE_ALUGRID_SFC_ORDERING
124  // get center of entity's geometry
125  VertexType center = entity.geometry().center();
126  // get hilbert index in [0,1]
127  return sfc_.index( center );
128 #else
129  return double(indexSet_.index( entity ));
130 #endif
131  }
132 
133  template< class Entity >
134  int rank( const Entity &entity ) const
135  {
136  alugrid_assert ( Entity::codimension == 0 );
137 #ifdef USE_ALUGRID_SFC_ORDERING
138  // get center of entity's geometry
139  VertexType center = entity.geometry().center();
140  // get hilbert index in [0,1]
141  const double hidx = sfc_.index( center );
142  // transform to element index
143  const long int index = (hidx / maxIndex_);
144  //std::cout << "sfc index = " << hidx << " " << index << std::endl;
145 #else
146  const long int index = indexSet_.index( entity );
147 #endif
148  return rank( index );
149  }
150 
151  protected:
152  int rank( long int index ) const
153  {
154  if( index < elementCuts_[ 0 ] ) return 0;
155  for( int p=1; p<pSize_; ++p )
156  {
157  if( index >= elementCuts_[ p-1 ] && index < elementCuts_[ p ] )
158  return p;
159  }
160  return pSize_-1;
161  }
162 
163  void calculateElementCuts()
164  {
165  const size_t nElements = indexSet_.size( 0 );
166 
167  // get number of MPI processes
168  const int nRanks = pSize_;
169 
170  // get minimal number of entities per process
171  const size_t minPerProc = (double(nElements) / double( nRanks ));
172  size_t maxPerProc = minPerProc ;
173  if( nElements % nRanks != 0 )
174  ++ maxPerProc ;
175 
176  // calculate percentage of elements with larger number
177  // of elements per process
178  double percentage = (double(nElements) / double( nRanks ));
179  percentage -= minPerProc ;
180  percentage *= nRanks ;
181 
182  int rank = 0;
183  size_t elementCount = maxPerProc ;
184  size_t elementNumber = 0;
185  size_t localElementNumber = 0;
186  const int lastRank = nRanks - 1;
187 
188  const size_t size = indexSet_.size( 0 );
189  for( size_t i=0; i<size; ++i )
190  {
191  if( localElementNumber >= elementCount )
192  {
193  elementCuts_[ rank ] = i ;
194 
195  // increase rank
196  if( rank < lastRank ) ++ rank;
197 
198  // reset local number
199  localElementNumber = 0;
200 
201  // switch to smaller number if red line is crossed
202  if( elementCount == maxPerProc && rank >= percentage )
203  elementCount = minPerProc ;
204  }
205 
206  // increase counters
207  ++elementNumber;
208  ++localElementNumber;
209  }
210 
211  // set cut for last process
212  elementCuts_[ lastRank ] = size ;
213 
214  //for( int p=0; p<pSize_; ++p )
215  // std::cout << "P[ " << p << " ] = " << elementCuts_[ p ] << std::endl;
216  }
217 
218  const CollectiveCommunication& comm_;
219 
220  const GridView& gridView_;
221  const IndexSet &indexSet_;
222 
223  const int pSize_;
224  std::vector< long int > elementCuts_ ;
225 
226 #ifdef USE_ALUGRID_SFC_ORDERING
227  // get element to hilbert (or Z) index mapping
229 #endif
230  double maxIndex_ ;
231  };
232 
233  public:
234  typedef typename Grid::ctype ctype;
235  typedef typename MPIHelper :: MPICommunicator MPICommunicatorType ;
236 
237  // type of communicator
238  typedef Dune :: CollectiveCommunication< MPICommunicatorType >
240 
241  static SharedPtrType
242  createCubeGrid( const std::string& filename,
243  MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
244  {
245  std::ifstream file( filename.c_str() );
246  if( ! file )
247  {
248  DUNE_THROW(InvalidStateException,"file not found " << filename );
249  }
250  return createCubeGrid( file, filename, mpiComm );
251  }
252 
253  static SharedPtrType
254  createCubeGrid( std::istream& input,
255  const std::string& name,
256  MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
257  {
258  CollectiveCommunication comm( MPIHelper :: getCommunicator() );
259  static_assert( dim == dimworld, "YaspGrid is used for creation of the structured grid which only supports dim == dimworld");
260 
261  Dune::dgf::IntervalBlock intervalBlock( input );
262  if( !intervalBlock.isactive() )
263  {
264  std::cerr << "No interval block found, using default DGF method to create ALUGrid!" << std::endl;
265  return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
266  }
267 
268  if( intervalBlock.numIntervals() != 1 )
269  {
270  std::cerr << "ALUGrid creation from YaspGrid can only handle 1 interval block, using default DGF method to create ALUGrid!" << std::endl;
271  return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
272  }
273 
274  if( intervalBlock.dimw() != dim )
275  {
276  std::cerr << "ALUGrid creation from YaspGrid only works for dim == dimworld, using default DGF method to create ALUGrid!" << std::endl;
277  return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
278  }
279 
280  const dgf::IntervalBlock::Interval &interval = intervalBlock.get( 0 );
281 
282  // only work for the new ALUGrid version
283  // if creation of SGrid fails the DGF file does not contain a proper
284  // IntervalBlock, and thus we cannot create the grid parallel,
285  // we will use the standard technique
286  std::array<int, dim> dims;
287  FieldVector<ctype, dimworld> lowerLeft;
288  FieldVector<ctype, dimworld> upperRight;
289  for( int i=0; i<dim; ++i )
290  {
291  dims[ i ] = interval.n[ i ] ;
292  lowerLeft[ i ] = interval.p[ 0 ][ i ];
293  upperRight[ i ] = interval.p[ 1 ][ i ];
294  }
295 
296  // broadcast array values
297  comm.broadcast( &dims[ 0 ], dim, 0 );
298  comm.broadcast( &lowerLeft [ 0 ], dim, 0 );
299  comm.broadcast( &upperRight[ 0 ], dim, 0 );
300 
301  std::string nameYasp( name );
302  nameYasp += " via YaspGrid";
303  typedef StructuredGridFactory< Grid > SGF;
304  return SGF :: createCubeGridImpl( lowerLeft, upperRight, dims, comm, nameYasp );
305  }
306 
307  template < class int_t >
308  static SharedPtrType
309  createSimplexGrid ( const FieldVector<ctype,dimworld>& lowerLeft,
310  const FieldVector<ctype,dimworld>& upperRight,
311  const std::array< int_t, dim>& elements,
312  MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
313  {
314  // create DGF interval block and use DGF parser to create simplex grid
315  std::stringstream dgfstream;
316  dgfstream << "DGF" << std::endl;
317  dgfstream << "Interval" << std::endl;
318  dgfstream << lowerLeft << std::endl;
319  dgfstream << upperRight << std::endl;
320  for( int i=0; i<dim; ++ i)
321  dgfstream << elements[ i ] << " ";
322  dgfstream << std::endl;
323  dgfstream << "#" << std::endl;
324  dgfstream << "Cube" << std::endl;
325  dgfstream << "#" << std::endl;
326  dgfstream << "Simplex" << std::endl;
327  dgfstream << "#" << std::endl;
328 
329  std::cout << dgfstream.str() << std::endl;
330 
331  Dune::GridPtr< Grid > grid( dgfstream, mpiComm );
332  return SharedPtrType( grid.release() );
333  }
334 
335  template < class int_t >
336  static SharedPtrType
337  createCubeGrid ( const FieldVector<ctype,dimworld>& lowerLeft,
338  const FieldVector<ctype,dimworld>& upperRight,
339  const std::array< int_t, dim>& elements,
340  MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
341  {
342  CollectiveCommunication comm( mpiComm );
343  std::string name( "Cartesian ALUGrid via YaspGrid" );
344  return createCubeGridImpl( lowerLeft, upperRight, elements, comm, name );
345  }
346 
347  protected:
348  template <int codim, class Entity>
349  int subEntities ( const Entity& entity ) const
350  {
351  return entity.subEntities( codim );
352  }
353 
354  template < class int_t >
355  static SharedPtrType
356  createCubeGridImpl ( const FieldVector<ctype,dimworld>& lowerLeft,
357  const FieldVector<ctype,dimworld>& upperRight,
358  const std::array< int_t, dim>& elements,
359  const CollectiveCommunication& comm,
360  const std::string& name )
361  {
362  const int myrank = comm.rank();
363 
364  typedef YaspGrid< dimworld, EquidistantOffsetCoordinates<double,dimworld> > CartesianGridType ;
365  std::array< int, dim > dims;
366  for( int i=0; i<dim; ++i ) dims[ i ] = elements[ i ];
367 
368  CollectiveCommunication commSelf( MPIHelper :: getLocalCommunicator() );
369  // create YaspGrid to partition and insert elements that belong to process directly
370  CartesianGridType sgrid( lowerLeft, upperRight, dims, std::bitset<dim>(0ULL), 1, commSelf );
371 
372  typedef typename CartesianGridType :: LeafGridView GridView ;
373  typedef typename GridView :: IndexSet IndexSet ;
374  typedef typename IndexSet :: IndexType IndexType ;
375  typedef typename GridView :: template Codim< 0 > :: Iterator ElementIterator ;
376  typedef typename ElementIterator::Entity Entity ;
377  typedef typename GridView :: IntersectionIterator IntersectionIterator ;
378  typedef typename IntersectionIterator :: Intersection Intersection ;
379 
380  GridView gridView = sgrid.leafGridView();
381  const IndexSet &indexSet = gridView.indexSet();
382 
383  // get decompostition of the marco grid
384  SimplePartitioner< GridView, InteriorBorder_Partition > partitioner( gridView, comm, lowerLeft, upperRight );
385 
386  // create ALUGrid GridFactory
387  GridFactory< Grid > factory;
388 
389  // map global vertex ids to local ones
390  std::map< IndexType, unsigned int > vtxMap;
391  std::map< double, const Entity > sortedElementList;
392 
393  const int numVertices = (1 << dim);
394  std::vector< unsigned int > vertices( numVertices );
395 
396  const ElementIterator end = gridView.template end< 0 >();
397  for( ElementIterator it = gridView.template begin< 0 >(); it != end; ++it )
398  {
399  const Entity &entity = *it;
400 
401  // if the element does not belong to our partition, continue
402  if( partitioner.rank( entity ) != myrank )
403  continue;
404 
405  const double elIndex = partitioner.index( entity );
406  assert( sortedElementList.find( elIndex ) == sortedElementList.end() );
407  sortedElementList.insert( std::make_pair( elIndex, entity ) );
408  }
409 
410  int nextElementIndex = 0;
411  const auto endSorted = sortedElementList.end();
412  for( auto it = sortedElementList.begin(); it != endSorted; ++it )
413  {
414  const Entity &entity = (*it).second;
415 
416  // insert vertices and element
417  const typename Entity::Geometry geo = entity.geometry();
418  alugrid_assert( numVertices == geo.corners() );
419  for( int i = 0; i < numVertices; ++i )
420  {
421  const IndexType vtxId = indexSet.subIndex( entity, i, dim );
422  //auto result = vtxMap.insert( std::make_pair( vtxId, vtxMap.size() ) );
423  std::pair< typename std::map< IndexType, unsigned int >::iterator, bool > result
424  = vtxMap.insert( std::make_pair( vtxId, vtxMap.size() ) );
425  if( result.second )
426  factory.insertVertex( geo.corner( i ), vtxId );
427  vertices[ i ] = result.first->second;
428  }
429 
430  factory.insertElement( entity.type(), vertices );
431  const int elementIndex = nextElementIndex++;
432 
433  //const auto iend = gridView.iend( entity );
434  //for( auto iit = gridView.ibegin( entity ); iit != iend; ++iit )
435  const IntersectionIterator iend = gridView.iend( entity );
436  for( IntersectionIterator iit = gridView.ibegin( entity ); iit != iend; ++iit )
437  {
438  const Intersection &isec = *iit;
439  const int faceNumber = isec.indexInInside();
440  // insert boundary face in case of domain boundary
441  if( isec.boundary() )
442  factory.insertBoundary( elementIndex, faceNumber );
443  // insert process boundary if the neighboring element has a different rank
444  if( isec.neighbor() && (partitioner.rank( isec.outside() ) != myrank) )
445  factory.insertProcessBorder( elementIndex, faceNumber );
446  }
447  }
448 
449  // create shared grid pointer
450  return SharedPtrType( factory.createGrid( true, true, name ) );
451  }
452  };
453 
454 } // namespace Dune
455 
456 #endif // #ifndef DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
#define alugrid_assert(EX)
Definition: alugrid_assert.hh:20
Definition: alu3dinclude.hh:50
Definition: alu3dinclude.hh:80
unstructured parallel implementation of the DUNE grid interface
Definition: alugrid.hh:31
BaseType::ctype ctype
Definition: alugrid.hh:48
Definition: hsfc.hh:167
Definition: structuredgridfactory.hh:34
Dune ::CollectiveCommunication< MPICommunicatorType > CollectiveCommunication
Definition: structuredgridfactory.hh:239
static SharedPtrType createCubeGrid(const std::string &filename, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:242
static SharedPtrType createCubeGrid(std::istream &input, const std::string &name, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:254
MPIHelper ::MPICommunicator MPICommunicatorType
Definition: structuredgridfactory.hh:235
ALUGrid< dim, dimworld, eltype, refineType, Comm > Grid
Definition: structuredgridfactory.hh:45
int subEntities(const Entity &entity) const
Definition: structuredgridfactory.hh:349
Dune::GridPtr< Grid >::mygrid_ptr SharedPtrType
Definition: structuredgridfactory.hh:50
StructuredGridFactory< Grid > This
Definition: structuredgridfactory.hh:54
static SharedPtrType createSimplexGrid(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const std::array< int_t, dim > &elements, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:309
static SharedPtrType createCubeGrid(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const std::array< int_t, dim > &elements, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:337
static SharedPtrType createCubeGridImpl(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const std::array< int_t, dim > &elements, const CollectiveCommunication &comm, const std::string &name)
Definition: structuredgridfactory.hh:356