refineable_mesh.template.cc
Go to the documentation of this file.
1 // LIC// ====================================================================
2 // LIC// This file forms part of oomph-lib, the object-oriented,
3 // LIC// multi-physics finite-element library, available
4 // LIC// at http://www.oomph-lib.org.
5 // LIC//
6 // LIC// Copyright (C) 2006-2023 Matthias Heil and Andrew Hazel
7 // LIC//
8 // LIC// This library is free software; you can redistribute it and/or
9 // LIC// modify it under the terms of the GNU Lesser General Public
10 // LIC// License as published by the Free Software Foundation; either
11 // LIC// version 2.1 of the License, or (at your option) any later version.
12 // LIC//
13 // LIC// This library is distributed in the hope that it will be useful,
14 // LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // LIC// Lesser General Public License for more details.
17 // LIC//
18 // LIC// You should have received a copy of the GNU Lesser General Public
19 // LIC// License along with this library; if not, write to the Free Software
20 // LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 // LIC// 02110-1301 USA.
22 // LIC//
23 // LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk.
24 // LIC//
25 // LIC//====================================================================
26 // Templated refineable mesh functions
27 
28 // Include guards to prevent multiple inclusion of the header
29 #ifndef OOMPH_REFINEABLE_MESH_TEMPLATE_CC
30 #define OOMPH_REFINEABLE_MESH_TEMPLATE_CC
31 
32 // Config header generated by autoconfig
33 #ifdef HAVE_CONFIG_H
34 #include <oomph-lib-config.h>
35 #endif
36 
37 // oomph-lib headers
38 #include "refineable_mesh.h"
39 #include "missing_masters.h"
41 
42 namespace oomph
43 {
44 #ifdef OOMPH_HAS_MPI
45 
46  //========================================================================
47  /// Additional actions required to synchronise halo nodes where master
48  /// nodes could not be found during synchronise_hanging_nodes().
49  /// Overloaded from Mesh class to take care of master nodes on
50  /// the outer edge of the halo layer which do not exist on that
51  /// processor. This fixes problems with the synchronisation of
52  /// hanging nodes for elements with non-uniformly spaced nodes.
53  //========================================================================
54  template<class ELEMENT>
56  const unsigned& ncont_interpolated_values)
57  {
58  // Check if additional synchronisation of hanging nodes is disabled
59  if (is_additional_synchronisation_of_hanging_nodes_disabled() == true)
60  {
61  return;
62  }
63 
64  // This provides all the node-adding helper functions required to
65  // reconstruct the missing halo master nodes on this processor
66  using namespace Missing_masters_functions;
67 
68 
69  double t_start = 0.0;
70  double t_end = 0.0;
72  {
73  t_start = TimingHelpers::timer();
74  }
75 
76  // Store number of processors and current process
77  MPI_Status status;
78  int n_proc = Comm_pt->nproc();
79  int my_rank = Comm_pt->my_rank();
80 
81 
82 #ifdef PARANOID
83  // Paranoid check to make sure nothing else is using the
84  // external storage. This will need to be changed at some
85  // point if we are to use non-uniformly spaced nodes in
86  // multi-domain problems.
87  bool err = false;
88  // Print out external storage
89  for (int d = 0; d < n_proc; d++)
90  {
91  if (d != my_rank)
92  {
93  // Check to see if external storage is being used by anybody else
94  if (nexternal_haloed_node(d) != 0)
95  {
96  err = true;
97  oomph_info << "Processor " << my_rank
98  << "'s external haloed nodes with processor " << d
99  << " are:" << std::endl;
100  for (unsigned i = 0; i < nexternal_haloed_node(d); i++)
101  {
102  oomph_info << "external_haloed_node_pt(" << d << "," << i
103  << ") = " << external_haloed_node_pt(d, i) << std::endl;
104  oomph_info << "x = ( " << external_haloed_node_pt(d, i)->x(0)
105  << " , " << external_haloed_node_pt(d, i)->x(1) << " )"
106  << std::endl;
107  }
108  }
109  }
110  }
111  for (int d = 0; d < n_proc; d++)
112  {
113  if (d != my_rank)
114  {
115  // Check to see if external storage is being used by anybody else
116  if (nexternal_halo_node(d) != 0)
117  {
118  err = true;
119  oomph_info << "Processor " << my_rank
120  << "'s external halo nodes with processor " << d
121  << " are:" << std::endl;
122  for (unsigned i = 0; i < nexternal_halo_node(d); i++)
123  {
124  oomph_info << "external_halo_node_pt(" << d << "," << i
125  << ") = " << external_halo_node_pt(d, i) << std::endl;
126  oomph_info << "x = ( " << external_halo_node_pt(d, i)->x(0) << " , "
127  << external_halo_node_pt(d, i)->x(1) << " )"
128  << std::endl;
129  }
130  }
131  }
132  }
133  if (err)
134  {
135  std::ostringstream err_stream;
136  err_stream << "There are already some nodes in the external storage"
137  << std::endl
138  << "for this mesh. This bit assumes that nothing else"
139  << std::endl
140  << "uses this storage (for now).";
141  throw OomphLibError(
142  err_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
143  }
144 #endif
145 
146 
147  // Compare the halo and haloed nodes for discrepancies in hanging status
148 
149  // Storage for the hanging status of halo/haloed nodes on elements
150  Vector<Vector<int>> haloed_hanging(n_proc);
151  Vector<Vector<int>> halo_hanging(n_proc);
152 
153  // Storage for the haloed nodes with discrepancies in their hanging status
154  // with each processor
155  Vector<std::map<Node*, unsigned>> haloed_hanging_node_with_discrepancy_pt(
156  n_proc);
157 
159  {
160  t_start = TimingHelpers::timer();
161  }
162 
163  // Store number of continuosly interpolated values as int
164  int ncont_inter_values = ncont_interpolated_values;
165 
166  // Loop over processes: Each processor checks that is haloed nodes
167  // with proc d have consistent hanging stats with halo counterparts.
168  for (int d = 0; d < n_proc; d++)
169  {
170  // No halo with self: Setup hang info for my haloed nodes with proc d
171  // then get ready to receive halo info from processor d.
172  if (d != my_rank)
173  {
174  // Loop over haloed nodes
175  unsigned nh = nhaloed_node(d);
176  for (unsigned j = 0; j < nh; j++)
177  {
178  // Get node
179  Node* nod_pt = haloed_node_pt(d, j);
180 
181  // Loop over the hanging status for each interpolated variable
182  // (and the geometry)
183  for (int icont = -1; icont < ncont_inter_values; icont++)
184  {
185  // Store the hanging status of this haloed node
186  if (nod_pt->is_hanging(icont))
187  {
188  unsigned n_master = nod_pt->hanging_pt(icont)->nmaster();
189  haloed_hanging[d].push_back(n_master);
190  }
191  else
192  {
193  haloed_hanging[d].push_back(0);
194  }
195  }
196  }
197 
198  // Receive the hanging status information from the corresponding process
199  unsigned count_haloed = haloed_hanging[d].size();
200 
201 #ifdef PARANOID
202  // Check that number of halo and haloed data match
203  unsigned tmp = 0;
204  MPI_Recv(&tmp, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm(), &status);
205  if (tmp != count_haloed)
206  {
207  std::ostringstream error_stream;
208  error_stream << "Number of halo data, " << tmp
209  << ", does not match number of haloed data, "
210  << count_haloed << std::endl;
211  throw OomphLibError(error_stream.str(),
212  OOMPH_CURRENT_FUNCTION,
213  OOMPH_EXCEPTION_LOCATION);
214  }
215 #endif
216 
217  // Get the data (if any)
218  if (count_haloed != 0)
219  {
220  halo_hanging[d].resize(count_haloed);
221  MPI_Recv(&halo_hanging[d][0],
222  count_haloed,
223  MPI_INT,
224  d,
225  0,
226  Comm_pt->mpi_comm(),
227  &status);
228  }
229  }
230  else // d==my_rank, i.e. current process: Send halo hanging status
231  // to process dd where it's received (see above) and compared
232  // and compared against the hang status of the haloed nodes
233  {
234  for (int dd = 0; dd < n_proc; dd++)
235  {
236  // No halo with yourself
237  if (dd != d)
238  {
239  // Storage for halo hanging status and counter
240  Vector<int> local_halo_hanging;
241 
242  // Loop over halo nodes
243  unsigned nh = nhalo_node(dd);
244  for (unsigned j = 0; j < nh; j++)
245  {
246  // Get node
247  Node* nod_pt = halo_node_pt(dd, j);
248 
249  // Loop over the hanging status for each interpolated variable
250  // (and the geometry)
251  for (int icont = -1; icont < ncont_inter_values; icont++)
252  {
253  // Store hanging status of halo node
254  if (nod_pt->is_hanging(icont))
255  {
256  unsigned n_master = nod_pt->hanging_pt(icont)->nmaster();
257  local_halo_hanging.push_back(n_master);
258  }
259  else
260  {
261  local_halo_hanging.push_back(0);
262  }
263  }
264  }
265 
266 
267  // Send the information to the relevant process
268  unsigned count_halo = local_halo_hanging.size();
269 
270 #ifdef PARANOID
271  // Check that number of halo and haloed data match
272  MPI_Send(&count_halo, 1, MPI_UNSIGNED, dd, 0, Comm_pt->mpi_comm());
273 #endif
274 
275  // Send data (if any)
276  if (count_halo != 0)
277  {
278  MPI_Send(&local_halo_hanging[0],
279  count_halo,
280  MPI_INT,
281  dd,
282  0,
283  Comm_pt->mpi_comm());
284  }
285  }
286  }
287  }
288  }
289 
291  {
292  t_end = TimingHelpers::timer();
293  oomph_info << "Time for first all-to-all in "
294  "additional_synchronise_hanging_nodes(): "
295  << t_end - t_start << std::endl;
296  t_start = TimingHelpers::timer();
297  }
298 
299 
300  // Now compare equivalent halo and haloed vectors to find discrepancies.
301  // It is possible that a master node may not be on either process involved
302  // in the halo-haloed scheme; to work round this, we use the shared_node
303  // storage scheme, which stores all nodes that are on each pair of
304  // processors in the same order on each of the two processors
305 
306 
307  // Loop over domains: Each processor checks consistency of hang status
308  // of its haloed nodes with proc d against the halo counterpart. Haloed
309  // wins if there are any discrepancies.
310  for (int d = 0; d < n_proc; d++)
311  {
312  // No halo with yourself
313  if (d != my_rank)
314  {
315  // Counter for traversing haloed data
316  unsigned count = 0;
317 
318  // Loop over haloed nodes
319  unsigned nh = nhaloed_node(d);
320  for (unsigned j = 0; j < nh; j++)
321  {
322  // Get node
323  Node* nod_pt = haloed_node_pt(d, j);
324 
325  // Loop over the hanging status for each interpolated variable
326  // (and the geometry)
327  for (int icont = -1; icont < ncont_inter_values; icont++)
328  {
329  // Compare hanging status of halo/haloed counterpart structure
330 
331  // Haloed is is hanging and haloed has different number
332  // of master nodes (which includes none in which case it isn't
333  // hanging)
334  if ((haloed_hanging[d][count] > 0) &&
335  (haloed_hanging[d][count] != halo_hanging[d][count]))
336  {
337  // Store this node so it can be synchronised later
338  haloed_hanging_node_with_discrepancy_pt[d].insert(
339  std::pair<Node*, unsigned>(nod_pt, d));
340  }
341  // Increment counter for number of haloed data
342  count++;
343  } // end of loop over icont
344  } // end of loop over haloed nodes
345  }
346  } // end loop over all processors
347 
348 
349  // Populate external halo(ed) node storage with master nodes of halo(ed)
350  // nodes
351 
352  // Loop over domains: Each processor checks consistency of hang status
353  // of its haloed nodes with proc d against the halo counterpart. Haloed
354  // wins if there are any discrepancies.
355  for (int d = 0; d < n_proc; d++)
356  {
357  // No halo with yourself
358  if (d != my_rank)
359  {
360  // Now add haloed master nodes to external storage
361  //===============================================
362 
363  // Storage for data to be sent
364  Vector<unsigned> send_unsigneds(0);
365  Vector<double> send_doubles(0);
366 
367  // Count number of haloed nonmaster nodes for halo process
368  unsigned nhaloed_nonmaster_nodes_processed = 0;
369  Vector<unsigned> haloed_nonmaster_node_index(0);
370 
371  // Loop over hanging halo nodes with discrepancies
372  std::map<Node*, unsigned>::iterator j;
373  for (j = haloed_hanging_node_with_discrepancy_pt[d].begin();
374  j != haloed_hanging_node_with_discrepancy_pt[d].end();
375  j++)
376  {
377  Node* nod_pt = (*j).first;
378  // Find index of this haloed node in the halo storage of processor d
379  //(But find in shared node storage in case it is actually haloed on
380  // another processor which we don't know about)
381  std::vector<Node*>::iterator it = std::find(
382  Shared_node_pt[d].begin(), Shared_node_pt[d].end(), nod_pt);
383  if (it != Shared_node_pt[d].end())
384  {
385  // Tell other processor to create this node
386  // send_unsigneds.push_back(1);
387  nhaloed_nonmaster_nodes_processed++;
388 
389  // Tell the other processor where to find this node in its halo node
390  // storage
391  unsigned index = it - Shared_node_pt[d].begin();
392  haloed_nonmaster_node_index.push_back(index);
393 
394  // Tell this processor that this node is really a haloed node
395  // This also packages up the data which needs to be sent to the
396  // processor on which the halo equivalent node lives
398  nod_pt,
399  this,
400  ncont_inter_values,
401  send_unsigneds,
402  send_doubles);
403  }
404  else
405  {
406  throw OomphLibError("Haloed node not found in haloed node storage",
407  OOMPH_CURRENT_FUNCTION,
408  OOMPH_EXCEPTION_LOCATION);
409  }
410  }
411 
412  // How much data needs to be sent?
413  unsigned send_unsigneds_count = send_unsigneds.size();
414  unsigned send_doubles_count = send_doubles.size();
415 
416  // Send ammount of data
417  MPI_Send(
418  &send_unsigneds_count, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
419  MPI_Send(
420  &send_doubles_count, 1, MPI_UNSIGNED, d, 1, Comm_pt->mpi_comm());
421 
422  // Send to halo process the number of haloed nodes we processed
423  MPI_Send(&nhaloed_nonmaster_nodes_processed,
424  1,
425  MPI_UNSIGNED,
426  d,
427  2,
428  Comm_pt->mpi_comm());
429  if (nhaloed_nonmaster_nodes_processed > 0)
430  {
431  MPI_Send(&haloed_nonmaster_node_index[0],
432  nhaloed_nonmaster_nodes_processed,
433  MPI_UNSIGNED,
434  d,
435  3,
436  Comm_pt->mpi_comm());
437  }
438 
439  // Send data about external halo nodes
440  if (send_unsigneds_count > 0)
441  {
442  // Only send if there is anything to send
443  MPI_Send(&send_unsigneds[0],
444  send_unsigneds_count,
445  MPI_UNSIGNED,
446  d,
447  4,
448  Comm_pt->mpi_comm());
449  }
450  if (send_doubles_count > 0)
451  {
452  // Only send if there is anything to send
453  MPI_Send(&send_doubles[0],
454  send_doubles_count,
455  MPI_DOUBLE,
456  d,
457  5,
458  Comm_pt->mpi_comm());
459  }
460  }
461  else // (d==my_rank), current process
462  {
463  // Now construct and add halo versions of master nodes to external
464  // storage
465  //=======================================================================
466 
467  // Loop over processors to get data
468  for (int dd = 0; dd < n_proc; dd++)
469  {
470  // Don't talk to yourself
471  if (dd != d)
472  {
473  // How much data to be received
474  unsigned nrecv_unsigneds = 0;
475  unsigned nrecv_doubles = 0;
476  MPI_Recv(&nrecv_unsigneds,
477  1,
478  MPI_UNSIGNED,
479  dd,
480  0,
481  Comm_pt->mpi_comm(),
482  &status);
483  MPI_Recv(&nrecv_doubles,
484  1,
485  MPI_UNSIGNED,
486  dd,
487  1,
488  Comm_pt->mpi_comm(),
489  &status);
490 
491  // Get from haloed process the number of halo nodes we need to
492  // process
493  unsigned nhalo_nonmaster_nodes_to_process = 0;
494  MPI_Recv(&nhalo_nonmaster_nodes_to_process,
495  1,
496  MPI_UNSIGNED,
497  dd,
498  2,
499  Comm_pt->mpi_comm(),
500  &status);
501  Vector<unsigned> halo_nonmaster_node_index(
502  nhalo_nonmaster_nodes_to_process);
503  if (nhalo_nonmaster_nodes_to_process != 0)
504  {
505  MPI_Recv(&halo_nonmaster_node_index[0],
506  nhalo_nonmaster_nodes_to_process,
507  MPI_UNSIGNED,
508  dd,
509  3,
510  Comm_pt->mpi_comm(),
511  &status);
512  }
513 
514  // Storage for data to be received
515  Vector<unsigned> recv_unsigneds(nrecv_unsigneds);
516  Vector<double> recv_doubles(nrecv_doubles);
517 
518  // Receive data about external haloed equivalent nodes
519  if (nrecv_unsigneds > 0)
520  {
521  // Only send if there is anything to send
522  MPI_Recv(&recv_unsigneds[0],
523  nrecv_unsigneds,
524  MPI_UNSIGNED,
525  dd,
526  4,
527  Comm_pt->mpi_comm(),
528  &status);
529  }
530  if (nrecv_doubles > 0)
531  {
532  // Only send if there is anything to send
533  MPI_Recv(&recv_doubles[0],
534  nrecv_doubles,
535  MPI_DOUBLE,
536  dd,
537  5,
538  Comm_pt->mpi_comm(),
539  &status);
540  }
541 
542  // Counters for flat packed data counters
543  unsigned recv_unsigneds_count = 0;
544  unsigned recv_doubles_count = 0;
545 
546  // Loop over halo nodes with discrepancies in their hanging status
547  for (unsigned j = 0; j < nhalo_nonmaster_nodes_to_process; j++)
548  {
549  // Get pointer to halo nonmaster node which needs processing
550  //(But given index is its index in the shared storage)
551  Node* nod_pt = shared_node_pt(dd, halo_nonmaster_node_index[j]);
552 
553 #ifdef PARANOID
554  // Check if we have a MacroElementNodeUpdateNode
555  if (dynamic_cast<MacroElementNodeUpdateNode*>(nod_pt))
556  {
557  // BENFLAG: The construction of missing master nodes for
558  // MacroElementNodeUpdateNodes does not work as
559  // expected. They require MacroElementNodeUpdateElements
560  // to be created for the missing halo nodes which will
561  // be added. It behaves as expected until duplicate
562  // nodes are pruned at the problem level.
563  std::ostringstream err_stream;
564  err_stream
565  << "This currently doesn't work for" << std::endl
566  << "MacroElementNodeUpdateNodes because these require"
567  << std::endl
568  << "MacroElementNodeUpdateElements to be created for"
569  << std::endl
570  << "the missing halo nodes which will be added" << std::endl;
571  throw OomphLibError(err_stream.str(),
572  OOMPH_CURRENT_FUNCTION,
573  OOMPH_EXCEPTION_LOCATION);
574  // OomphLibWarning(err_stream.str(),
575  // OOMPH_CURRENT_FUNCTION,
576  // OOMPH_EXCEPTION_LOCATION);
577  }
578 #endif
579 
580  // Construct copy of node and add to external halo node storage.
581  unsigned loc_p = (unsigned)dd;
582  unsigned node_index;
583  recursively_add_masters_of_external_halo_node_to_storage<ELEMENT>(
584  nod_pt,
585  this,
586  loc_p,
587  node_index,
588  ncont_inter_values,
589  recv_unsigneds_count,
590  recv_unsigneds,
591  recv_doubles_count,
592  recv_doubles);
593  }
594 
595  } // end of dd!=d
596  } // end of second loop over all processors
597  }
598  } // end loop over all processors
599 
600 
602  {
603  t_end = TimingHelpers::timer();
604  oomph_info << "Time for second all-to-all in "
605  "additional_synchronise_hanging_nodes() "
606  << t_end - t_start << std::endl;
607  t_start = TimingHelpers::timer();
608  }
609 
610  // Populate external halo(ed) node storage with master nodes of halo(ed)
611  // nodes [end]
612 
613  // Count how many external halo/haloed nodes are added
614  unsigned external_halo_count = 0;
615  unsigned external_haloed_count = 0;
616 
617  // Flag to test whether we attampt to add any duplicate haloed nodes to the
618  // shared storage -- if this is the case then we have duplicate halo nodes
619  // on another processor but with different pointers and the shared scheme
620  // will not be set up correctly
621  bool duplicate_haloed_node_exists = false;
622 
623  // Loop over all the processors and add the shared nodes
624  for (int d = 0; d < n_proc; d++)
625  {
626  // map of bools for whether the (external) node has been shared,
627  // initialised to 0 (false) for each domain d
628  std::map<Node*, bool> node_shared;
629 
630  // For all domains lower than the current domain: Do halos first
631  // then haloed, to ensure correct order in lookup scheme from
632  // the other side
633  if (d < my_rank)
634  {
635  // Do external halo nodes
636  unsigned nexternal_halo_nod = nexternal_halo_node(d);
637  for (unsigned j = 0; j < nexternal_halo_nod; j++)
638  {
639  Node* nod_pt = external_halo_node_pt(d, j);
640 
641  // Add it as a shared node from current domain
642  if (!node_shared[nod_pt])
643  {
644  this->add_shared_node_pt(d, nod_pt);
645  node_shared[nod_pt] = true;
646  external_halo_count++;
647  }
648 
649  } // end loop over nodes
650 
651  // Do external haloed nodes
652  unsigned nexternal_haloed_nod = nexternal_haloed_node(d);
653  for (unsigned j = 0; j < nexternal_haloed_nod; j++)
654  {
655  Node* nod_pt = external_haloed_node_pt(d, j);
656 
657  // Add it as a shared node from current domain
658  if (!node_shared[nod_pt])
659  {
660  this->add_shared_node_pt(d, nod_pt);
661  node_shared[nod_pt] = true;
662  external_haloed_count++;
663  }
664  else
665  {
666  duplicate_haloed_node_exists = true;
667  }
668 
669  } // end loop over nodes
670  }
671 
672  // If the domain is bigger than the current rank: Do haloed first
673  // then halo, to ensure correct order in lookup scheme from
674  // the other side
675  if (d > my_rank)
676  {
677  // Do external haloed nodes
678  unsigned nexternal_haloed_nod = nexternal_haloed_node(d);
679  for (unsigned j = 0; j < nexternal_haloed_nod; j++)
680  {
681  Node* nod_pt = external_haloed_node_pt(d, j);
682 
683  // Add it as a shared node from current domain
684  if (!node_shared[nod_pt])
685  {
686  this->add_shared_node_pt(d, nod_pt);
687  node_shared[nod_pt] = true;
688  external_haloed_count++;
689  }
690  else
691  {
692  duplicate_haloed_node_exists = true;
693  }
694 
695  } // end loop over nodes
696 
697  // Do external halo nodes
698  unsigned nexternal_halo_nod = nexternal_halo_node(d);
699  for (unsigned j = 0; j < nexternal_halo_nod; j++)
700  {
701  Node* nod_pt = external_halo_node_pt(d, j);
702 
703  // Add it as a shared node from current domain
704  if (!node_shared[nod_pt])
705  {
706  this->add_shared_node_pt(d, nod_pt);
707  node_shared[nod_pt] = true;
708  external_halo_count++;
709  }
710 
711  } // end loop over nodes
712 
713  } // end if (d ...)
714 
715  } // end loop over processes
716 
717 
718  // Say how many external halo/haloed nodes were added
719  oomph_info << "INFO: " << external_halo_count << " external halo nodes and"
720  << std::endl;
721  oomph_info << "INFO: " << external_haloed_count
722  << " external haloed nodes were added to the shared node scheme"
723  << std::endl;
724 
725  // If we added duplicate haloed nodes, throw an error
726  if (duplicate_haloed_node_exists)
727  {
728  // This problem should now be avoided because we are using existing
729  // communication methods to locate nodes in this case. The error used
730  // to arise as follows:
731  /// / Let my_rank==A. If this has happened then it means that
732  /// / duplicate haloed nodes exist on another processor (B). This
733  /// / problem arises if a master of a haloed node with a discrepancy
734  /// / is haloed with a different processor (C). A copy is constructed
735  /// / in the external halo storage on processor (B) because that node
736  /// / is not found in the (internal) haloed storage on (A) with (B)
737  /// / but that node already exists on processor (B) in the (internal)
738  /// / halo storage with processor (C). Thus two copies of this master
739  /// / node now exist on processor (B).
740 
741  std::ostringstream err_stream;
742  err_stream << "Duplicate halo nodes exist on another processor!"
743  << std::endl
744  << "(See source code for more detailed explanation)"
745  << std::endl;
746 
747  throw OomphLibError(
748  err_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
749  }
750 
751 
753  {
754  t_end = TimingHelpers::timer();
755  oomph_info << "Time for identification of shared nodes in "
756  "additional_synchronise_hanging_nodes(): "
757  << t_end - t_start << std::endl;
758  }
759  }
760 
761 #endif
762 
763 } // namespace oomph
764 
765 #endif
cstr elem_len * i
Definition: cfortran.h:603
unsigned nmaster() const
Return the number of master nodes.
Definition: nodes.h:785
////////////////////////////////////////////////////////////////////
Nodes are derived from Data, but, in addition, have a definite (Eulerian) position in a space of a gi...
Definition: nodes.h:906
HangInfo *const & hanging_pt() const
Return pointer to hanging node data (this refers to the geometric hanging node status) (const version...
Definition: nodes.h:1228
bool is_hanging() const
Test whether the node is geometrically hanging.
Definition: nodes.h:1285
An OomphLibError object which should be thrown when an run-time error is encountered....
void additional_synchronise_hanging_nodes(const unsigned &ncont_interpolated_values)
Additional setup of shared node scheme This is Required for reconcilliation of hanging nodes acrross ...
A slight extension to the standard template vector class so that we can include "graceful" array rang...
Definition: Vector.h:58
bool Doc_comprehensive_timings
Global boolean to switch on comprehensive timing – can probably be declared const false when developm...
void recursively_add_masters_of_external_haloed_node(int &iproc, Node *nod_pt, Mesh *const &mesh_pt, int &n_cont_inter_values, Vector< unsigned > &send_unsigneds, Vector< double > &send_doubles)
Recursively add any master nodes (and their master nodes etc) of external nodes.
double timer()
returns the time in seconds after some point in past
//////////////////////////////////////////////////////////////////// ////////////////////////////////...
OomphInfo oomph_info
Single (global) instantiation of the OomphInfo object – this is used throughout the library as a "rep...