AMDiS  0.1
The Adaptive Multi-Dimensional Simulation Toolbox
DOFMapping.inc.hpp
1 #pragma once
2 
3 #include <utility>
4 
5 #include <dune/common/timer.hh>
6 
7 #if HAVE_MPI
8 #include <amdis/common/parallel/Collective.hpp>
9 #include <amdis/common/parallel/RequestOperations.hpp>
10 #endif
11 
12 namespace AMDiS {
13 
14 #if HAVE_MPI
15 
16 template <class PIS, class GI>
17  template <class Communication>
18 void ParallelDofMapping<PIS,GI>::
19 update(Communication& c)
20 {
21  Dune::Timer t;
22  Mpi::Communicator world(Environment::comm());
23 
24  // clear all vectors and reset sizes
25  reset();
26 
27  // 1. insert and number owner DOFs
28  globalIndices_.resize(c.indexSet().size(), 0);
29  owner_.resize(c.indexSet().size(), false);
30  localSize_ = 0;
31  ghostSize_ = 0;
32  for (auto const& ip : c.indexSet()) {
33  if (ip.local().attribute() == Attribute::owner) {
34  size_type idx = ip.local();
35  globalIndices_[idx] = localSize_++;
36  owner_[idx] = true;
37  } else {
38  ghostSize_++;
39  }
40  }
41 
42  // communicate the local sizes from all processors
43  Mpi::all_gather(world, localSize_, sizes_);
44 
45  // at which global index do the local partitions start
46  starts_.resize(world.size() + 1);
47  starts_[0] = 0;
48  std::partial_sum(sizes_.begin(), sizes_.end(), starts_.begin()+1);
49  globalSize_ = starts_.back();
50 
51  // update the global index for all local indices by shifting by global start position
52  for (auto& i : globalIndices_)
53  i += starts_[world.rank()];
54 
55  // build up the communication of overlap DOFs that do not yet have a global index
56  // assigned. Therefore send the global index for all already computed owner DOFs
57  // to the neighboring remote processors. And receive from those their owner DOFs
58  // global indices.
59  using GlobalAssoc = std::pair<typename ParallelIndexSet::GlobalIndex, size_type>; // {globalId, globalIndex}
60  std::vector<std::vector<GlobalAssoc>> sendList(world.size());
61  std::vector<std::size_t> receiveList(world.size(), 0);
62 
63  // Communicate attributes at the interface
64  for (auto const& rim : c.remoteIndices()) {
65  int p = rim.first;
66 
67  auto* sourceRemoteIndexList = rim.second.first;
68  auto* targetRemoteIndexList = rim.second.second;
69 
70  // send to overlap
71  for (auto const& ri : *sourceRemoteIndexList) {
72  auto const& lip = ri.localIndexPair();
73  Attribute remoteAttr = ri.attribute();
74  Attribute myAttr = lip.local().attribute();
75  if (myAttr == Attribute::owner && remoteAttr != Attribute::owner) {
76  size_type globalIndex = globalIndices_[size_type(lip.local())];
77  sendList[p].push_back({lip.global(), globalIndex});
78  }
79  }
80 
81  // receive from owner
82  for (auto const& ri : *targetRemoteIndexList) {
83  auto const& lip = ri.localIndexPair();
84  Attribute remoteAttr = ri.attribute();
85  Attribute myAttr = lip.local().attribute();
86  if (myAttr != Attribute::owner && remoteAttr == Attribute::owner) {
87  receiveList[p]++;
88  }
89  }
90  }
91  // all ghostDOFs must be communicated!
92  assert(ghostSize_ == std::accumulate(receiveList.begin(), receiveList.end(), 0u));
93 
94  // send {globalId, globalIndex} to remote processors
95  std::vector<Mpi::Request> sendRequests;
96  for (int p = 0; p < world.size(); ++p) {
97  if (!sendList[p].empty()) {
98  sendRequests.emplace_back( world.isend(sendList[p], p, tag_) );
99  }
100  }
101 
102  // receive {globalID, globalIndex} from remote processors
103  std::vector<Mpi::Request> recvRequests;
104  std::vector<std::vector<GlobalAssoc>> recvData(world.size());
105  for (int p = 0; p < world.size(); ++p) {
106  if (receiveList[p] > 0)
107  recvRequests.emplace_back( world.irecv(recvData[p], p, tag_) );
108  }
109 
110  Mpi::wait_all(recvRequests.begin(), recvRequests.end());
111 
112  ghostIndices_.reserve(ghostSize_);
113  ghostLocalIndices_.resize(c.indexSet().size(), LocalIndex(-1));
114 
115  // insert all remote global indices into the map
116  std::size_t counter = 0;
117  for (int p = 0; p < world.size(); ++p) {
118  auto const& data = recvData[p];
119  assert(data.size() == receiveList[p]);
120  for (auto const& d : data) {
121  typename PIS::IndexPair const& l = c.indexSet().at(d.first);
122  assert(!owner_[size_type(l.local())]);
123 
124  globalIndices_[size_type(l.local())] = d.second;
125  ghostIndices_.push_back(d.second);
126  ghostLocalIndices_[size_type(l.local())] = counter++;
127  }
128  }
129  assert(counter == ghostSize_);
130  assert(ghostSize_ + localSize_ == c.indexSet().size());
131 
132  Mpi::wait_all(sendRequests.begin(), sendRequests.end());
133  msg("update DofMapping need {} sec", t.elapsed());
134 }
135 
136 
137 template <class PIS, class GI>
138 void ParallelDofMapping<PIS,GI>::
139 debug() const
140 {
141  int p = Environment::mpiRank();
142  std::cout << "[" << p << "] sizes_.size()=" << sizes_.size() << ", my_size=" << sizes_[p] << std::endl;
143  std::cout << "[" << p << "] starts_.size()=" << starts_.size() << ", my_start=" << starts_[p] << std::endl;
144  std::cout << "[" << p << "] localSize_=" << localSize_ << ", globalSize_=" << globalSize_ << ", ghostSize_=" << ghostSize_ << std::endl;
145  std::cout << "[" << p << "] globalIndices_.size()=" << globalIndices_.size() << std::endl;
146  std::cout << "[" << p << "] ghostIndices_.size()=" << ghostIndices_.size() << std::endl;
147  std::cout << "[" << p << "] ghostLocalIndices_.size()=" << ghostLocalIndices_.size() << std::endl;
148  std::cout << "[" << p << "] owner_.size()=" << owner_.size() << std::endl;
149 }
150 #endif
151 
152 } // end namespace AMDiS
Contains all classes needed for solving linear and non linear equation systems.
Definition: AdaptBase.hpp:6
void msg(std::string const &str, Args &&... args)
print a message
Definition: Output.hpp:98
static Dune::MPIHelper::MPICommunicator comm()
Return the MPI_Comm object (or a fake communicator)
Definition: Environment.hpp:86
static int mpiRank()
Return the MPI_Rank of the current processor.
Definition: Environment.hpp:68