mesh.cc
Go to the documentation of this file.
1 // LIC// ====================================================================
2 // LIC// This file forms part of oomph-lib, the object-oriented,
3 // LIC// multi-physics finite-element library, available
4 // LIC// at http://www.oomph-lib.org.
5 // LIC//
6 // LIC// Copyright (C) 2006-2023 Matthias Heil and Andrew Hazel
7 // LIC//
8 // LIC// This library is free software; you can redistribute it and/or
9 // LIC// modify it under the terms of the GNU Lesser General Public
10 // LIC// License as published by the Free Software Foundation; either
11 // LIC// version 2.1 of the License, or (at your option) any later version.
12 // LIC//
13 // LIC// This library is distributed in the hope that it will be useful,
14 // LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // LIC// Lesser General Public License for more details.
17 // LIC//
18 // LIC// You should have received a copy of the GNU Lesser General Public
19 // LIC// License along with this library; if not, write to the Free Software
20 // LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 // LIC// 02110-1301 USA.
22 // LIC//
23 // LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk.
24 // LIC//
25 // LIC//====================================================================
26 // Non-inline member functions for general mesh classes
27 
28 #ifdef OOMPH_HAS_MPI
29 #include "mpi.h"
30 #endif
31 
32 #include <algorithm>
33 #include <limits.h>
34 #include <typeinfo>
35 
36 
37 // oomph-lib headers
38 #include "oomph_utilities.h"
39 #include "mesh.h"
40 #include "problem.h"
41 #include "elastic_problems.h"
42 #include "refineable_mesh.h"
43 #include "triangle_mesh.h"
44 #include "shape.h"
45 
46 namespace oomph
47 {
48  //======================================================
49  /// The Steady Timestepper
50  //======================================================
51  Steady<0> Mesh::Default_TimeStepper;
52 
53 
54  //=======================================================================
55  /// Static boolean flag to control warning about mesh level timesteppers
56  //=======================================================================
58  false;
59 
60  //=======================================================================
61  /// Merge meshes.
62  /// Note: This simply merges the meshes' elements and nodes (ignoring
63  /// duplicates; no boundary information etc. is created).
64  //=======================================================================
65  void Mesh::merge_meshes(const Vector<Mesh*>& sub_mesh_pt)
66  {
67  // No boundary lookup scheme is set up for the combined mesh
69 
70  // Number of submeshes
71  unsigned nsub_mesh = sub_mesh_pt.size();
72 
73  // Initialise element, node and boundary counters for global mesh
74  unsigned long n_element = 0;
75  unsigned long n_node = 0;
76  unsigned n_bound = 0;
77 
78  // Loop over submeshes and get total number of elements, nodes and
79  // boundaries
80  for (unsigned imesh = 0; imesh < nsub_mesh; imesh++)
81  {
82  n_element += sub_mesh_pt[imesh]->nelement();
83  n_node += sub_mesh_pt[imesh]->nnode();
84  n_bound += sub_mesh_pt[imesh]->nboundary();
85  }
86 
87  // Reserve storage for element and node pointers
88  Element_pt.clear();
89  Element_pt.reserve(n_element);
90  Node_pt.clear();
91  Node_pt.reserve(n_node);
92 
93  // Resize vector of vectors of nodes
94  Boundary_node_pt.clear();
95  Boundary_node_pt.resize(n_bound);
96 
97  // Sets of pointers to elements and nodes (to exlude duplicates -- they
98  // shouldn't occur anyway but if they do, they must only be added
99  // once in the global mesh to avoid trouble in the timestepping)
100  std::set<GeneralisedElement*> element_set_pt;
101  std::set<Node*> node_set_pt;
102 
103  // Counter for total number of boundaries in all the submeshes
104  unsigned ibound_global = 0;
105  // Loop over the number of submeshes
106  for (unsigned imesh = 0; imesh < nsub_mesh; imesh++)
107  {
108  // Loop over the elements of the submesh and add to vector
109  // duplicates are ignored
110  unsigned nel_before = 0;
111  unsigned long n_element = sub_mesh_pt[imesh]->nelement();
112  for (unsigned long e = 0; e < n_element; e++)
113  {
114  GeneralisedElement* el_pt = sub_mesh_pt[imesh]->element_pt(e);
115  element_set_pt.insert(el_pt);
116  // Was it a duplicate?
117  unsigned nel_now = element_set_pt.size();
118  if (nel_now == nel_before)
119  {
120  std::ostringstream warning_stream;
121  warning_stream << "WARNING: " << std::endl
122  << "Element " << e << " in submesh " << imesh
123  << " is a duplicate \n and was ignored when assembling"
124  << " combined mesh." << std::endl;
125  OomphLibWarning(warning_stream.str(),
126  "Mesh::Mesh(const Vector<Mesh*>&)",
127  OOMPH_EXCEPTION_LOCATION);
128  }
129  else
130  {
131  Element_pt.push_back(el_pt);
132  }
133  nel_before = nel_now;
134  }
135 
136  // Loop over the nodes of the submesh and add to vector
137  // duplicates are ignored
138  unsigned nnod_before = 0;
139  unsigned long n_node = sub_mesh_pt[imesh]->nnode();
140  for (unsigned long n = 0; n < n_node; n++)
141  {
142  Node* nod_pt = sub_mesh_pt[imesh]->node_pt(n);
143  node_set_pt.insert(nod_pt);
144  // Was it a duplicate?
145  unsigned nnod_now = node_set_pt.size();
146  if (nnod_now == nnod_before)
147  {
148  std::ostringstream warning_stream;
149  warning_stream
150  << "WARNING: " << std::endl
151  << "Node " << n << " in submesh " << imesh
152  << " is a duplicate \n and was ignored when assembling "
153  << "combined mesh." << std::endl;
154  OomphLibWarning(warning_stream.str(),
155  "Mesh::Mesh(const Vector<Mesh*>&)",
156  OOMPH_EXCEPTION_LOCATION);
157  }
158  else
159  {
160  Node_pt.push_back(nod_pt);
161  }
162  nnod_before = nnod_now;
163  }
164 
165  // Loop over the boundaries of the submesh
166  unsigned n_bound = sub_mesh_pt[imesh]->nboundary();
167  for (unsigned ibound = 0; ibound < n_bound; ibound++)
168  {
169  // Loop over the number of nodes on the boundary and add to the
170  // global vector
171  unsigned long n_bound_node = sub_mesh_pt[imesh]->nboundary_node(ibound);
172  for (unsigned long n = 0; n < n_bound_node; n++)
173  {
174  Boundary_node_pt[ibound_global].push_back(
175  sub_mesh_pt[imesh]->boundary_node_pt(ibound, n));
176  }
177  // Increase the number of the global boundary counter
178  ibound_global++;
179  }
180  } // End of loop over submeshes
181  }
182 
183 
184  //========================================================
185  /// Remove the information about nodes stored on the
186  /// b-th boundary of the mesh
187  //========================================================
188  void Mesh::remove_boundary_nodes(const unsigned& b)
189  {
190  // Loop over all the nodes on the boundary and call
191  // their remove_from_boundary function
192  unsigned n_boundary_node = Boundary_node_pt[b].size();
193  for (unsigned n = 0; n < n_boundary_node; n++)
194  {
196  }
197  // Clear the storage
198  Boundary_node_pt[b].clear();
199  }
200 
201  //=================================================================
202  /// Remove all information about mesh boundaries
203  //================================================================
205  {
206  // Loop over each boundary call remove_boundary_nodes
207  unsigned n_bound = Boundary_node_pt.size();
208  for (unsigned b = 0; b < n_bound; b++)
209  {
211  }
212  // Clear the storage
213  Boundary_node_pt.clear();
214  }
215 
216  //============================================================
217  /// Remove the node node_pt from the b-th boundary of the mesh
218  /// This function also removes the information from the Node
219  /// itself
220  //===========================================================
221  void Mesh::remove_boundary_node(const unsigned& b, Node* const& node_pt)
222  {
223  // Find the location of the node in the boundary
224  Vector<Node*>::iterator it = std::find(
225  Boundary_node_pt[b].begin(), Boundary_node_pt[b].end(), node_pt);
226  // If the node is on this boundary
227  if (it != Boundary_node_pt[b].end())
228  {
229  // Remove the node from the mesh's list of boundary nodes
230  Boundary_node_pt[b].erase(it);
231  // Now remove the node's boundary information
233  }
234  // If not do nothing
235  }
236 
237 
238  //========================================================
239  /// Add the node node_pt to the b-th boundary of the mesh
240  /// This function also sets the boundary information in the
241  /// Node itself
242  //=========================================================
243  void Mesh::add_boundary_node(const unsigned& b, Node* const& node_pt)
244  {
245  // Tell the node that it's on boundary b.
246  // At this point, if the node is not a BoundaryNode, the function
247  // should throw an exception.
249 
250  // Get the size of the Boundary_node_pt vector
251  unsigned nbound_node = Boundary_node_pt[b].size();
252  bool node_already_on_this_boundary = false;
253  // Loop over the vector
254  for (unsigned n = 0; n < nbound_node; n++)
255  {
256  // is the current node here already?
257  if (node_pt == Boundary_node_pt[b][n])
258  {
259  node_already_on_this_boundary = true;
260  }
261  }
262 
263  // Add the base node pointer to the vector if it's not there already
264  if (!node_already_on_this_boundary)
265  {
266  Boundary_node_pt[b].push_back(node_pt);
267  }
268  }
269 
270 
271  //=======================================================
272  /// Update nodal positions in response to changes in the domain shape.
273  /// Uses the FiniteElement::get_x(...) function for FiniteElements
274  /// and doesn't do anything for other element types.
275  /// If a MacroElement pointer has been set for a FiniteElement,
276  /// the MacroElement representation is used to update the
277  /// nodal positions; if not get_x(...) uses the FE interpolation
278  /// and thus leaves the nodal positions unchanged.
279  /// Virtual, so it can be overloaded by specific meshes,
280  /// such as AlgebraicMeshes or SpineMeshes.
281  /// Generally, this function updates the position of all nodes
282  /// in response to changes in the boundary position. For
283  /// SolidNodes it only applies the update to those SolidNodes
284  /// whose position is determined by the boundary position, unless
285  /// the bool flag is set to true.
286  //========================================================
287  void Mesh::node_update(const bool& update_all_solid_nodes)
288  {
289  // Get the current time
290  double t_start = TimingHelpers::timer();
291 
292 #ifdef PARANOID
293 #ifdef OOMPH_HAS_MPI
294  // Paranoid check to throw an error if node update is called for elements
295  // with nonuniformly spaced nodes for which some masters are 'external'
296  for (unsigned long n = 0; n < nnode(); n++)
297  {
298  Node* nod_pt = Node_pt[n];
299  if (nod_pt->is_hanging())
300  {
301  // Loop over master nodes
302  unsigned nmaster = nod_pt->hanging_pt()->nmaster();
303  for (unsigned imaster = 0; imaster < nmaster; imaster++)
304  {
305  // Get pointer to master node
306  Node* master_nod_pt = nod_pt->hanging_pt()->master_node_pt(imaster);
307 
308  // Get vector of all external halo nodes
311 
312  // Search the external halo storage for this node
313  Vector<Node*>::iterator it = std::find(external_halo_node_pt.begin(),
314  external_halo_node_pt.end(),
315  master_nod_pt);
316 
317  // Check if the node was found
318  if (it != external_halo_node_pt.end())
319  {
320  // Throw error becase node update won't work
321  // It's ok to throw an error here because this function is
322  // overloaded for Algebraic and MacroElementNodeUpdate
323  // Meshes. This is only a problem for meshes of ordinary
324  // nodes.
325  std::ostringstream err_stream;
326 
327  err_stream << "Calling node_update() for a mesh which contains"
328  << std::endl
329  << "master nodes which live in the external storage."
330  << std::endl
331  << "These nodes do not belong to elements on this"
332  << std::endl
333  << "processor and therefore cannot be updated locally."
334  << std::endl;
335 
336  throw OomphLibError(err_stream.str(),
337  OOMPH_CURRENT_FUNCTION,
338  OOMPH_EXCEPTION_LOCATION);
339  }
340  }
341  }
342  }
343  // If we get to here then none of the masters of any of the nodes in the
344  // mesh live in the external storage, so we'll be fine if we carry on.
345 #endif
346 #endif
347 
348  /// Local and global (Eulerian) coordinate
350  Vector<double> r;
351 
352  // NB: This repeats nodes a lot - surely it would be
353  // quicker to modify it so that it only does each node once,
354  // particularly in the update_all_solid_nodes=true case?
355  // Create a map to indicate whether or not we've updated a node already
356  std::map<Node*, bool> has_node_been_updated;
357 
358  // How many nodes are there?
359  unsigned n_node = nnode();
360 
361  // Loop over all Nodes
362  for (unsigned n = 0; n < n_node; n++)
363  {
364  // Get pointer to node
365  Node* nod_pt = node_pt(n);
366 
367  // Initialise the boolean value associated with this node
368  has_node_been_updated[nod_pt] = false;
369  }
370 
371  // Loop over all elements
372  unsigned nel = nelement();
373  for (unsigned e = 0; e < nel; e++)
374  {
375  // Try to cast to FiniteElement
376  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(element_pt(e));
377 
378  // If it's a finite element we can proceed: FiniteElements have
379  // nodes and a get_x() function
380  if (el_pt != 0)
381  {
382  // Find out dimension of element = number of local coordinates
383  unsigned ndim_el = el_pt->dim();
384  s.resize(ndim_el);
385 
386  // Loop over nodal points
387  unsigned n_node = el_pt->nnode();
388  for (unsigned j = 0; j < n_node; j++)
389  {
390  // Get pointer to node
391  Node* nod_pt = el_pt->node_pt(j);
392 
393  // Get spatial dimension of node
394  unsigned ndim_node = nod_pt->ndim();
395  r.resize(ndim_node);
396 
397  // For non-hanging nodes
398  if (!(nod_pt->is_hanging()))
399  {
400  // If we've not dealt with this Node yet
401  if (!has_node_been_updated[nod_pt])
402  {
403  // Get the position of the node
404  el_pt->local_coordinate_of_node(j, s);
405 
406  // Get new position
407  el_pt->get_x(s, r);
408 
409  // Try to cast to SolidNode
410  SolidNode* solid_node_pt = dynamic_cast<SolidNode*>(nod_pt);
411 
412  // Loop over coordinate directions
413  for (unsigned i = 0; i < ndim_node; i++)
414  {
415  // It's a SolidNode:
416  if (solid_node_pt != 0)
417  {
418  // only do it if explicitly requested!
419  if (update_all_solid_nodes)
420  {
421  solid_node_pt->x(i) = r[i];
422  }
423  }
424  // Not a SolidNode: Definitely update
425  else
426  {
427  nod_pt->x(i) = r[i];
428  }
429  }
430 
431  // Indicate that we're done with this node, regardless of whether
432  // or not it even needed updating
433  has_node_been_updated[nod_pt] = true;
434  } // if (!has_node_been_updated[nod_pt])
435  } // if (!(nod_pt->is_hanging()))
436  } // for (unsigned j=0;j<n_node;j++)
437  } // if (el_pt!=0)
438  } // for (unsigned e=0;e<nel;e++)
439 
440  // Now update the external halo nodes before we adjust the positions of the
441  // hanging nodes incase any are masters of local nodes
442 #ifdef OOMPH_HAS_MPI
443  // Loop over all external halo nodes with other processors
444  // and update them
445  for (std::map<unsigned, Vector<Node*>>::iterator it =
446  External_halo_node_pt.begin();
447  it != External_halo_node_pt.end();
448  it++)
449  {
450  // Get vector of external halo nodes
451  Vector<Node*> ext_halo_node_pt = (*it).second;
452  unsigned nnod = ext_halo_node_pt.size();
453  for (unsigned j = 0; j < nnod; j++)
454  {
455  ext_halo_node_pt[j]->node_update();
456  }
457  }
458 #endif
459 
460  // Now loop over hanging nodes and adjust their position
461  // in line with their hanging node constraints
462  for (unsigned long n = 0; n < n_node; n++)
463  {
464  Node* nod_pt = Node_pt[n];
465  if (nod_pt->is_hanging())
466  {
467  // Get spatial dimension of node
468  unsigned ndim_node = nod_pt->ndim();
469 
470  // Initialise
471  for (unsigned i = 0; i < ndim_node; i++)
472  {
473  nod_pt->x(i) = 0.0;
474  }
475 
476  // Loop over master nodes
477  unsigned nmaster = nod_pt->hanging_pt()->nmaster();
478  for (unsigned imaster = 0; imaster < nmaster; imaster++)
479  {
480  // Loop over directions
481  for (unsigned i = 0; i < ndim_node; i++)
482  {
483  nod_pt->x(i) +=
484  nod_pt->hanging_pt()->master_node_pt(imaster)->x(i) *
485  nod_pt->hanging_pt()->master_weight(imaster);
486  }
487  }
488  }
489  }
490 
491  // Loop over all nodes again and execute auxiliary node update
492  // function
493  for (unsigned long n = 0; n < n_node; n++)
494  {
495  Node_pt[n]->perform_auxiliary_node_update_fct();
496  }
497 
498  // Tell the user how long it's taken
499  oomph_info << "Time taken to update nodal positions [sec]: "
500  << TimingHelpers::timer() - t_start << std::endl;
501  }
502 
503 
504  //=======================================================
505  /// Reorder nodes in the order in which they are
506  /// encountered when stepping through the elements
507  //========================================================
508  void Mesh::reorder_nodes(const bool& use_old_ordering)
509  {
510  // Create storage for the reordered nodes
511  Vector<Node*> reordering;
512 
513  // Get the reordered nodes (without altering the mesh's node vector)
514  get_node_reordering(reordering, use_old_ordering);
515 
516  // Get the number of nodes in the mesh
517  unsigned n_node = nnode();
518 
519  // Loop over all of the nodes
520  for (unsigned i = 0; i < n_node; i++)
521  {
522  // Replace the Mesh's i-th node pointer with the reordered node pointer
523  node_pt(i) = reordering[i];
524  }
525  } // End of reorder_nodes
526 
527  //=======================================================
528  /// Get a vector of the nodes in the order in which they are encountered
529  /// when stepping through the elements (similar to reorder_nodes() but
530  /// without changing the mesh's node vector).
531  //========================================================
533  const bool& use_old_ordering) const
534  {
535  // If the user wants to use the original order
536  if (use_old_ordering)
537  {
538  // Setup map to check if nodes have been done yet
539  std::map<Node*, bool> done;
540 
541  // Loop over all nodes
542  unsigned nnod = nnode();
543 
544  // Initialise the vector
545  reordering.assign(nnod, 0);
546 
547  // Return immediately if there are no nodes: Note assumption:
548  // Either all the elements' nodes stored here or none. If only a subset
549  // is stored in the Node_pt vector we'll get a range checking error below
550  // (only if run with range checking, of course).
551  if (nnod == 0)
552  {
553  // Return immediately
554  return;
555  }
556 
557  // Loop over the nodes in the mesh
558  for (unsigned j = 0; j < nnod; j++)
559  {
560  // Indicate whether or not the node has been swapped
561  done[node_pt(j)] = false;
562  }
563 
564  // Initialise counter for number of nodes
565  unsigned long count = 0;
566 
567  // Get the number of elements in the mesh
568  unsigned nel = nelement();
569 
570  // Loop over all elements
571  for (unsigned e = 0; e < nel; e++)
572  {
573  // Make sure the e-th element is a FiniteElement (or derived) class
574  // object
575  FiniteElement* el_pt =
576  checked_dynamic_cast<FiniteElement*>(element_pt(e));
577 
578  // Get the number of nodes in this element
579  unsigned nnod = el_pt->nnode();
580 
581  // Loop over nodes in element
582  for (unsigned j = 0; j < nnod; j++)
583  {
584  // Get a pointer to the j-th node in the element
585  Node* nod_pt = el_pt->node_pt(j);
586 
587  // Has node been done yet?
588  if (!done[nod_pt])
589  {
590  // Insert into node vector. NOTE: If you get a seg fault/range
591  // checking error here then you probably haven't added all the
592  // elements' nodes to the Node_pt vector -- this is most likely to
593  // arise in the case of meshes of face elements (though they usually
594  // don't store the nodes at all so if you have any problems here
595  // there's something unusual/not quite right in any case... For this
596  // reason we don't range check here by default (not even under
597  // paranoia) but force you turn on proper (costly) range checking to
598  // track this down...
599  reordering[count] = nod_pt;
600 
601  // Indicate that the node has been done
602  done[nod_pt] = true;
603 
604  // Increase counter
605  count++;
606  }
607  } // for (unsigned j=0;j<nnod;j++)
608  } // for (unsigned e=0;e<nel;e++)
609 
610  // Sanity check
611  if (count != nnod)
612  {
613  // Create an error message
614  std::string error_message = "Trouble: Number of nodes hasn't stayed ";
615 
616  // Finish off the message
617  error_message += "constant during reordering!\n";
618 
619  // Throw an error
620  throw OomphLibError(
621  error_message, OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
622  }
623  }
624  else
625  {
626  // Copy node vector out
627  unsigned n_node = nnode();
628 
629  // Resize the node ordering vector
630  reordering.resize(n_node);
631 
632  // Loop over the nodes
633  for (unsigned i = 0; i < n_node; i++)
634  {
635  // Assign the i-th node pointer entry
636  reordering[i] = node_pt(i);
637  }
638 
639  // Now sort the nodes lexicographically
640  std::sort(reordering.begin(),
641  reordering.end(),
643  } // if (use_old_ordering)
644  } // End of get_node_reordering
645 
646 
647  //========================================================
648  /// Virtual Destructor to clean up all memory
649  //========================================================
651  {
652  // Free the nodes first
653  // Loop over the nodes in reverse order
654  unsigned long Node_pt_range = Node_pt.size();
655  for (unsigned long i = Node_pt_range; i > 0; i--)
656  {
657  delete Node_pt[i - 1];
658  Node_pt[i - 1] = 0;
659  }
660 
661  // Free the elements
662  // Loop over the elements in reverse order
663  unsigned long Element_pt_range = Element_pt.size();
664  for (unsigned long i = Element_pt_range; i > 0; i--)
665  {
666  delete Element_pt[i - 1];
667  Element_pt[i - 1] = 0;
668  }
669 
670  // Wipe the storage for all externally-based elements and delete halos
672  }
673 
674  //========================================================
675  /// Assign (global) equation numbers to the nodes
676  //========================================================
678  {
679  // Find out the current number of equations
680  unsigned long equation_number = Dof_pt.size();
681 
682  // Loop over the nodes and call their assigment functions
683  unsigned long nnod = Node_pt.size();
684 
685  for (unsigned long i = 0; i < nnod; i++)
686  {
687  Node_pt[i]->assign_eqn_numbers(equation_number, Dof_pt);
688  }
689 
690  // Loop over the elements and number their internals
691  unsigned long nel = Element_pt.size();
692  for (unsigned long i = 0; i < nel; i++)
693  {
694  Element_pt[i]->assign_internal_eqn_numbers(equation_number, Dof_pt);
695  }
696 
697  // Return the total number of equations
698  return (equation_number);
699  }
700 
701  //========================================================
702  /// Function to describe the dofs of the Mesh. The ostream
703  /// specifies the output stream to which the description
704  /// is written; the string stores the currently
705  /// assembled output that is ultimately written to the
706  /// output stream by Data::describe_dofs(...); it is typically
707  /// built up incrementally as we descend through the
708  /// call hierarchy of this function when called from
709  /// Problem::describe_dofs(...)
710  //========================================================
711  void Mesh::describe_dofs(std::ostream& out,
712  const std::string& current_string) const
713  {
714  // Loop over the nodes and call their classification functions
715  unsigned long nnod = Node_pt.size();
716  for (unsigned long i = 0; i < nnod; i++)
717  {
718  std::stringstream conversion;
719  conversion << " of Node " << i << current_string;
720  std::string in(conversion.str());
721  Node_pt[i]->describe_dofs(out, in);
722  }
723 
724  // Loop over the elements and classify.
725  unsigned long nel = Element_pt.size();
726  for (unsigned long i = 0; i < nel; i++)
727  {
728  std::stringstream conversion;
729  conversion << " in Element " << i << " [" << typeid(*Element_pt[i]).name()
730  << "] " << current_string;
731  std::string in(conversion.str());
732  Element_pt[i]->describe_dofs(out, in);
733  }
734  }
735 
736  //========================================================
737  /// Function to describe the local dofs of the elements. The ostream
738  /// specifies the output stream to which the description
739  /// is written; the string stores the currently
740  /// assembled output that is ultimately written to the
741  /// output stream by Data::describe_dofs(...); it is typically
742  /// built up incrementally as we descend through the
743  /// call hierarchy of this function when called from
744  /// Problem::describe_dofs(...)
745  //========================================================
746  void Mesh::describe_local_dofs(std::ostream& out,
747  const std::string& current_string) const
748  {
749  // Now loop over the elements and describe local dofs
750  unsigned long nel = Element_pt.size();
751  for (unsigned long i = 0; i < nel; i++)
752  {
753  std::stringstream conversion;
754  conversion << " in Element" << i << " [" << typeid(*Element_pt[i]).name()
755  << "] " << current_string;
756  std::string in(conversion.str());
757  Element_pt[i]->describe_local_dofs(out, in);
758  }
759  }
760 
761 
762  //========================================================
763  /// Assign local equation numbers in all elements
764  //========================================================
765  void Mesh::assign_local_eqn_numbers(const bool& store_local_dof_pt)
766  {
767  // Now loop over the elements and assign local equation numbers
768  unsigned long Element_pt_range = Element_pt.size();
769  for (unsigned long i = 0; i < Element_pt_range; i++)
770  {
771  Element_pt[i]->assign_local_eqn_numbers(store_local_dof_pt);
772  }
773  }
774 
775  //========================================================
776  /// Self-test: Check elements and nodes. Return 0 for OK
777  //========================================================
778  unsigned Mesh::self_test()
779  {
780  // Initialise
781  bool passed = true;
782 
783  // Check the mesh for repeated nodes (issues its own error message)
784  if (0 != check_for_repeated_nodes()) passed = false;
785 
786  // hierher -- re-enable once problem with Hermite elements has been
787  // resolved.
788  // // Check if there are any inverted elements
789  // bool mesh_has_inverted_elements=false;
790  // check_inverted_elements(mesh_has_inverted_elements);
791  // if (mesh_has_inverted_elements)
792  // {
793  // passed=false;
794  // oomph_info << "\n ERROR: Mesh has inverted elements\n"
795  // << " Run Mesh::check_inverted_elements(...) with"
796  // << " with output stream to find out which elements are"
797  // << " inverted.\n";
798  // }
799 
800  // Loop over the elements, check for duplicates and do self test
801  std::set<GeneralisedElement*> element_set_pt;
802  unsigned long Element_pt_range = Element_pt.size();
803  for (unsigned long i = 0; i < Element_pt_range; i++)
804  {
805  if (Element_pt[i]->self_test() != 0)
806  {
807  passed = false;
808  oomph_info << "\n ERROR: Failed Element::self_test() for element i="
809  << i << std::endl;
810  }
811  // Add to set (which ignores duplicates):
812  element_set_pt.insert(Element_pt[i]);
813  }
814 
815  // Check for duplicates:
816  if (element_set_pt.size() != Element_pt_range)
817  {
818  oomph_info << "ERROR: " << Element_pt_range - element_set_pt.size()
819  << " duplicate elements were encountered in mesh!"
820  << std::endl;
821  passed = false;
822  }
823 
824 
825  // Loop over the nodes, check for duplicates and do self test
826  std::set<Node*> node_set_pt;
827  unsigned long Node_pt_range = Node_pt.size();
828  for (unsigned long i = 0; i < Node_pt_range; i++)
829  {
830  if (Node_pt[i]->self_test() != 0)
831  {
832  passed = false;
833  oomph_info << "\n ERROR: Failed Node::self_test() for node i=" << i
834  << std::endl;
835  }
836  // Add to set (which ignores duplicates):
837  node_set_pt.insert(Node_pt[i]);
838  }
839 
840  // Check for duplicates:
841  if (node_set_pt.size() != Node_pt_range)
842  {
843  oomph_info << "ERROR: " << Node_pt_range - node_set_pt.size()
844  << " duplicate nodes were encountered in mesh!" << std::endl;
845  passed = false;
846  }
847 
848  // Return verdict
849  if (passed)
850  {
851  return 0;
852  }
853  else
854  {
855  return 1;
856  }
857  }
858 
859 
860  //========================================================
861  /// Check for inverted elements and report outcome
862  /// in boolean variable. This visits all elements at their
863  /// integration points and checks if the Jacobian of the
864  /// mapping between local and global coordinates is positive --
865  /// using the same test that would be carried out (but only in PARANOID
866  /// mode) during the assembly of the elements' Jacobian matrices.
867  /// Inverted elements are output in inverted_element_file (if the
868  /// stream is open).
869  //========================================================
870  void Mesh::check_inverted_elements(bool& mesh_has_inverted_elements,
871  std::ofstream& inverted_element_file)
872  {
873  // Initialise flag
874  mesh_has_inverted_elements = false;
875 
876  // Suppress output while checking for inverted elements
877  bool backup =
880 
881  // Loop over all elements
882  unsigned nelem = nelement();
883  for (unsigned e = 0; e < nelem; e++)
884  {
886 
887  // Only check for finite elements
888  if (el_pt != 0)
889  {
890  // Find out number of nodes and local coordinates in the element
891  unsigned n_node = el_pt->nnode();
892  unsigned n_dim = el_pt->dim();
893  unsigned ndim_node = el_pt->nodal_dimension();
894 
895  // Can't check Jacobian for elements in which nodal and elementa
896  // dimensions don't match
897  if (n_dim == ndim_node)
898  {
899  // Set up memory for the shape and test function and local coord
900  Shape psi(n_node);
901  DShape dpsidx(n_node, n_dim);
902  Vector<double> s(n_dim);
903 
904  // Initialise element-level test
905  bool is_inverted = false;
906 
907  unsigned n_intpt = el_pt->integral_pt()->nweight();
908  for (unsigned ipt = 0; ipt < n_intpt; ipt++)
909  {
910  // Local coordinates
911  for (unsigned i = 0; i < n_dim; i++)
912  {
913  s[i] = el_pt->integral_pt()->knot(ipt, i);
914  }
915 
916  double J = 0;
917  // Dummy assignment to keep gcc from complaining about
918  // "set but unused".
919  J += 0.0;
920  try
921  {
922  // Call the derivatives of the shape functions and Jacobian
923  J = el_pt->dshape_eulerian(s, psi, dpsidx);
924 
925  // If code is compiled without PARANOID setting, the
926  // above call will simply return the negative Jacobian
927  // without failing, so we need to check manually
928 #ifndef PARANOID
929  try
930  {
931  el_pt->check_jacobian(J);
932  }
933  catch (OomphLibQuietException& error)
934  {
935  is_inverted = true;
936  }
937 #endif
938  }
939  catch (OomphLibQuietException& error)
940  {
941  is_inverted = true;
942  }
943  }
944  if (is_inverted)
945  {
946  mesh_has_inverted_elements = true;
947  if (inverted_element_file.is_open())
948  {
949  el_pt->output(inverted_element_file);
950  }
951  }
952  }
953  }
954  }
955  // Reset
957  backup;
958  }
959 
960 
961  //========================================================
962  /// Nodes that have been marked as obsolete are removed
963  /// from the mesh and the its boundaries. Returns vector
964  /// of pointers to deleted nodes.
965  //========================================================
967  {
968  // Only copy the 'live' nodes across to new mesh
969  //----------------------------------------------
970 
971  // New Vector of pointers to nodes
972  Vector<Node*> new_node_pt;
973  Vector<Node*> deleted_node_pt;
974 
975  // Loop over all nodes in mesh
976  unsigned long n_node = nnode();
977  for (unsigned long n = 0; n < n_node; n++)
978  {
979  // If the node still exists: Copy across
980  if (!(Node_pt[n]->is_obsolete()))
981  {
982  new_node_pt.push_back(Node_pt[n]);
983  }
984  // Otherwise the Node is gone:
985  // Delete it for good if it does not lie on a boundary
986  // (if it lives on a boundary we have to remove it from
987  // the boundary lookup schemes below)
988  else
989  {
990  if (!(Node_pt[n]->is_on_boundary()))
991  {
992  deleted_node_pt.push_back(Node_pt[n]);
993  delete Node_pt[n];
994  Node_pt[n] = 0;
995  }
996  }
997  }
998 
999  // Now update old vector by setting it equal to the new vector
1000  Node_pt = new_node_pt;
1001 
1002 
1003  // Boundaries
1004  //-----------
1005 
1006  // Only copy the 'live' nodes into new boundary node arrays
1007  //---------------------------------------------------------
1008  // Loop over the boundaries
1009  unsigned num_bound = nboundary();
1010  for (unsigned ibound = 0; ibound < num_bound; ibound++)
1011  {
1012  // New Vector of pointers to existent boundary nodes
1013  Vector<Node*> new_boundary_node_pt;
1014 
1015  // Loop over the boundary nodes
1016  unsigned long Nboundary_node = Boundary_node_pt[ibound].size();
1017 
1018  // Reserve contiguous memory for new vector of pointers
1019  // Must be equal in size to the number of nodes or less
1020  new_boundary_node_pt.reserve(Nboundary_node);
1021 
1022  for (unsigned long n = 0; n < Nboundary_node; n++)
1023  {
1024  // If node still exists: Copy across
1025  if (!(Boundary_node_pt[ibound][n]->is_obsolete()))
1026  {
1027  new_boundary_node_pt.push_back(Boundary_node_pt[ibound][n]);
1028  }
1029  // Otherwise Node is gone: Delete it for good
1030  else
1031  {
1032  // The node may lie on multiple boundaries, so remove the node
1033  // from the current boundary
1034  Boundary_node_pt[ibound][n]->remove_from_boundary(ibound);
1035 
1036  // Now if the node is no longer on any boundaries, delete it
1037  if (!Boundary_node_pt[ibound][n]->is_on_boundary())
1038  {
1039  deleted_node_pt.push_back(
1040  dynamic_cast<Node*>(Boundary_node_pt[ibound][n]));
1041 
1042  delete Boundary_node_pt[ibound][n];
1043  }
1044  }
1045  }
1046 
1047  // Update the old vector by setting it equal to the new vector
1048  Boundary_node_pt[ibound] = new_boundary_node_pt;
1049 
1050  } // End of loop over boundaries
1051 
1052  // Tell us who you deleted
1053  return deleted_node_pt;
1054  }
1055 
1056 
1057  //========================================================
1058  /// Output function for the mesh boundaries
1059  ///
1060  /// Loop over all boundaries and dump out the coordinates
1061  /// of the points on the boundary (in individual tecplot
1062  /// zones)
1063  //========================================================
1064  void Mesh::output_boundaries(std::ostream& outfile)
1065  {
1066  // Loop over the boundaries
1067  unsigned num_bound = nboundary();
1068  for (unsigned long ibound = 0; ibound < num_bound; ibound++)
1069  {
1070  unsigned nnod = Boundary_node_pt[ibound].size();
1071  if (nnod > 0)
1072  {
1073  outfile << "ZONE T=\"boundary" << ibound << "\"\n";
1074 
1075  for (unsigned inod = 0; inod < nnod; inod++)
1076  {
1077  Boundary_node_pt[ibound][inod]->output(outfile);
1078  }
1079  }
1080  }
1081  }
1082 
1083 
1084  //===================================================================
1085  /// Dump function for the mesh class.
1086  /// Loop over all nodes and elements and dump them
1087  //===================================================================
1088  void Mesh::dump(std::ofstream& dump_file, const bool& use_old_ordering) const
1089  {
1090  // Get a reordering of the nodes so that the dump file is in a standard
1091  // ordering regardless of the sequence of mesh refinements etc.
1092  Vector<Node*> reordering;
1093  this->get_node_reordering(reordering, use_old_ordering);
1094 
1095  // Find number of nodes
1096  unsigned long Node_pt_range = this->nnode();
1097 
1098  // Doc # of nodes
1099  dump_file << Node_pt_range << " # number of nodes " << std::endl;
1100 
1101  // Loop over all the nodes and dump their data
1102  for (unsigned nd = 0; nd < Node_pt_range; nd++)
1103  {
1104  reordering[nd]->dump(dump_file);
1105  }
1106 
1107  // Loop over elements and deal with internal data
1108  unsigned n_element = this->nelement();
1109  for (unsigned e = 0; e < n_element; e++)
1110  {
1111  GeneralisedElement* el_pt = this->element_pt(e);
1112  unsigned n_internal = el_pt->ninternal_data();
1113  if (n_internal > 0)
1114  {
1115  dump_file << n_internal
1116  << " # number of internal Data items in element " << e
1117  << std::endl;
1118  for (unsigned i = 0; i < n_internal; i++)
1119  {
1120  el_pt->internal_data_pt(i)->dump(dump_file);
1121  }
1122  }
1123  }
1124  }
1125 
1126 
1127  //=======================================================
1128  /// Read solution from restart file
1129  //=======================================================
1130  void Mesh::read(std::ifstream& restart_file)
1131  {
1132  std::string input_string;
1133 
1134  // Reorder the nodes within the mesh's node vector
1135  // to establish a standard ordering regardless of the sequence
1136  // of mesh refinements etc
1137  this->reorder_nodes();
1138 
1139  // Read nodes
1140 
1141  // Find number of nodes
1142  unsigned long n_node = this->nnode();
1143 
1144  // Read line up to termination sign
1145  getline(restart_file, input_string, '#');
1146 
1147  // Ignore rest of line
1148  restart_file.ignore(80, '\n');
1149 
1150  // Check # of nodes:
1151  unsigned long check_n_node = atoi(input_string.c_str());
1152  if (check_n_node != n_node)
1153  {
1154  std::ostringstream error_stream;
1155  error_stream << "The number of nodes allocated " << n_node
1156  << " is not the same as specified in the restart file "
1157  << check_n_node << std::endl;
1158 
1159  throw OomphLibError(
1160  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1161  }
1162 
1163  // Loop over the nodes
1164  for (unsigned long n = 0; n < n_node; n++)
1165  {
1166  /// Try to cast to elastic node
1167  SolidNode* el_node_pt = dynamic_cast<SolidNode*>(this->node_pt(n));
1168  if (el_node_pt != 0)
1169  {
1170  el_node_pt->read(restart_file);
1171  }
1172  else
1173  {
1174  this->node_pt(n)->read(restart_file);
1175  }
1176  }
1177 
1178  // Read internal data of elements:
1179  //--------------------------------
1180  // Loop over elements and deal with internal data
1181  unsigned n_element = this->nelement();
1182  for (unsigned e = 0; e < n_element; e++)
1183  {
1184  GeneralisedElement* el_pt = this->element_pt(e);
1185  unsigned n_internal = el_pt->ninternal_data();
1186  if (n_internal > 0)
1187  {
1188  // Read line up to termination sign
1189  getline(restart_file, input_string, '#');
1190 
1191  // Ignore rest of line
1192  restart_file.ignore(80, '\n');
1193 
1194  // Check # of internals :
1195  unsigned long check_n_internal = atoi(input_string.c_str());
1196  if (check_n_internal != n_internal)
1197  {
1198  std::ostringstream error_stream;
1199  error_stream << "The number of internal data " << n_internal
1200  << " is not the same as specified in the restart file "
1201  << check_n_internal << std::endl;
1202 
1203  throw OomphLibError(error_stream.str(),
1204  OOMPH_CURRENT_FUNCTION,
1205  OOMPH_EXCEPTION_LOCATION);
1206  }
1207 
1208  for (unsigned i = 0; i < n_internal; i++)
1209  {
1210  el_pt->internal_data_pt(i)->read(restart_file);
1211  }
1212  }
1213  }
1214  }
1215 
1216 
1217  //========================================================
1218  /// Output in paraview format into specified file.
1219  ///
1220  /// Breaks up each element into sub-elements for plotting
1221  /// purposes. We assume that all elements are of the same
1222  /// type (fct will break (in paranoid mode) if paraview
1223  /// output fcts of the elements are inconsistent).
1224  //========================================================
1225  void Mesh::output_paraview(std::ofstream& file_out,
1226  const unsigned& nplot) const
1227  {
1228  // Change the scientific format so that E is used rather than e
1229  file_out.setf(std::ios_base::uppercase);
1230 
1231  // Decide how many elements there are to be plotted
1232  unsigned long number_of_elements = this->Element_pt.size();
1233 
1234  // Cast to finite element and return if cast fails.
1235  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(0));
1236 
1237 #ifdef PARANOID
1238  if (fe_pt == 0)
1239  {
1240  throw OomphLibError("Recast for FiniteElement failed for element 0!\n",
1241  OOMPH_CURRENT_FUNCTION,
1242  OOMPH_EXCEPTION_LOCATION);
1243  }
1244 #endif
1245 
1246 
1247 #ifdef PARANOID
1248  // Check if all elements have the same number of degrees of freedom,
1249  // if they don't, paraview will break
1250  unsigned el_zero_ndof = fe_pt->nscalar_paraview();
1251  for (unsigned i = 1; i < number_of_elements; i++)
1252  {
1253  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1254  unsigned el_i_ndof = fe_pt->nscalar_paraview();
1255  if (el_zero_ndof != el_i_ndof)
1256  {
1257  std::stringstream error_stream;
1258  error_stream
1259  << "Element " << i << " has different number of degrees of freedom\n"
1260  << "than from previous elements, Paraview cannot handle this.\n"
1261  << "We suggest that the problem is broken up into submeshes instead."
1262  << std::endl;
1263  throw OomphLibError(
1264  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1265  }
1266  }
1267 #endif
1268 
1269  // Make variables to hold the number of nodes and elements
1270  unsigned long number_of_nodes = 0;
1271  unsigned long total_number_of_elements = 0;
1272 
1273  // Loop over all the elements to find total number of plot points
1274  for (unsigned i = 0; i < number_of_elements; i++)
1275  {
1276  // Cast to FiniteElement and (in paranoid mode) check
1277  // if cast has failed.
1278  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1279 
1280 #ifdef PARANOID
1281  if (fe_pt == 0)
1282  {
1283  std::stringstream error_stream;
1284  error_stream << "Recast for element " << i << " failed" << std::endl;
1285  throw OomphLibError(
1286  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1287  }
1288 #endif
1289 
1290  number_of_nodes += fe_pt->nplot_points_paraview(nplot);
1291  total_number_of_elements += fe_pt->nsub_elements_paraview(nplot);
1292  }
1293 
1294 
1295  // File Declaration
1296  //------------------
1297 
1298  // Insert the necessary lines plus header of file, and
1299  // number of nodes and elements
1300  file_out << "<?xml version=\"1.0\"?>\n"
1301  << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" "
1302  << "byte_order=\"LittleEndian\">\n"
1303  << "<UnstructuredGrid>\n"
1304  << "<Piece NumberOfPoints=\"" << number_of_nodes
1305  << "\" NumberOfCells=\"" << total_number_of_elements << "\">\n";
1306 
1307 
1308  // Point Data
1309  //-----------
1310 
1311  // Check the number of degrees of freedom
1312  unsigned ndof = fe_pt->nscalar_paraview();
1313 
1314  // Point data is going in here
1315  file_out << "<PointData ";
1316 
1317  // Insert just the first scalar name, since paraview reads everything
1318  // else after that as being of the same type. Get information from
1319  // first element.
1320  file_out << "Scalars=\"" << fe_pt->scalar_name_paraview(0) << "\">\n";
1321 
1322  // Loop over i scalar fields and j number of elements
1323  for (unsigned i = 0; i < ndof; i++)
1324  {
1325  file_out << "<DataArray type=\"Float32\" "
1326  << "Name=\"" << fe_pt->scalar_name_paraview(i) << "\" "
1327  << "format=\"ascii\""
1328  << ">\n";
1329 
1330  for (unsigned j = 0; j < number_of_elements; j++)
1331  {
1332  // Cast to FiniteElement and (in paranoid mode) check
1333  // if cast has failed.
1334  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(j));
1335 
1336 #ifdef PARANOID
1337  if (fe_pt == 0)
1338  {
1339  std::stringstream error_stream;
1340  error_stream << "Recast for element " << j << " failed" << std::endl;
1341  throw OomphLibError(error_stream.str(),
1342  OOMPH_CURRENT_FUNCTION,
1343  OOMPH_EXCEPTION_LOCATION);
1344  }
1345 #endif
1346 
1347  fe_pt->scalar_value_paraview(file_out, i, nplot);
1348  }
1349 
1350  // Close of the DataArray
1351  file_out << "</DataArray>\n";
1352  }
1353 
1354  // Close off the PointData set
1355  file_out << "</PointData>\n";
1356 
1357 
1358  // Geometric Points
1359  //------------------
1360 
1361  file_out << "<Points>\n"
1362  << "<DataArray type=\"Float32\""
1363  << " NumberOfComponents=\""
1364  // This always has to be 3 for an unstructured grid
1365  << 3 << "\" "
1366  << "format=\"ascii\">\n";
1367 
1368  // Loop over all the elements to print their plot points
1369  for (unsigned i = 0; i < number_of_elements; i++)
1370  {
1371  // Cast to FiniteElement and (in paranoid mode) check
1372  // if cast has failed.
1373  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1374 
1375 #ifdef PARANOID
1376  if (fe_pt == 0)
1377  {
1378  std::stringstream error_stream;
1379  error_stream << "Recast for element " << i << " faild" << std::endl;
1380  throw OomphLibError(
1381  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1382  }
1383 #endif
1384 
1385  fe_pt->output_paraview(file_out, nplot);
1386  }
1387 
1388  file_out << "</DataArray>\n"
1389  << "</Points>\n";
1390 
1391 
1392  // Cells
1393  //-------
1394 
1395  file_out
1396  << "<Cells>\n"
1397  << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n";
1398 
1399  // Make counter for keeping track of all the local elements,
1400  // because Paraview requires global coordinates
1401  unsigned counter = 0;
1402 
1403  // Write connectivity with the local elements
1404  for (unsigned i = 0; i < number_of_elements; i++)
1405  {
1406  // Cast to FiniteElement and (in paranoid mode) check
1407  // if cast has failed.
1408  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1409 
1410 #ifdef PARANOID
1411  if (fe_pt == 0)
1412  {
1413  std::stringstream error_stream;
1414  error_stream << "Recast for element " << i << " faild" << std::endl;
1415  throw OomphLibError(
1416  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1417  }
1418 #endif
1419  fe_pt->write_paraview_output_offset_information(file_out, nplot, counter);
1420  }
1421 
1422  file_out << "</DataArray>\n"
1423  << "<DataArray type=\"Int32\" "
1424  << "Name=\"offsets\" format=\"ascii\">\n";
1425 
1426  // Make variable that holds the current offset number
1427  unsigned offset_sum = 0;
1428 
1429  // Write the offset for the specific elements
1430  for (unsigned i = 0; i < number_of_elements; i++)
1431  {
1432  // Cast to FiniteElement and (in paranoid mode) check
1433  // if cast has failed.
1434  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1435 
1436 #ifdef PARANOID
1437  if (fe_pt == 0)
1438  {
1439  std::stringstream error_stream;
1440  error_stream << "Recast for element " << i << " failed" << std::endl;
1441  throw OomphLibError(
1442  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1443  }
1444 #endif
1445  fe_pt->write_paraview_offsets(file_out, nplot, offset_sum);
1446  }
1447 
1448  file_out << "</DataArray>\n"
1449  << "<DataArray type=\"UInt8\" Name=\"types\">\n";
1450 
1451  // Loop over all elements to get the type that they have
1452  for (unsigned i = 0; i < number_of_elements; i++)
1453  {
1454  // Cast to FiniteElement and (in paranoid mode) check
1455  // if cast has failed.
1456  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1457 
1458 #ifdef PARANOID
1459  if (fe_pt == 0)
1460  {
1461  std::stringstream error_stream;
1462  error_stream << "Recast for element " << i << " failed" << std::endl;
1463  throw OomphLibError(
1464  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1465  }
1466 #endif
1467 
1468  fe_pt->write_paraview_type(file_out, nplot);
1469  }
1470 
1471  file_out << "</DataArray>\n"
1472  << "</Cells>\n";
1473 
1474 
1475  // File Closure
1476  //-------------
1477  file_out << "</Piece>\n"
1478  << "</UnstructuredGrid>\n"
1479  << "</VTKFile>";
1480  }
1481 
1482 
1483  //========================================================
1484  /// Output in paraview format into specified file.
1485  ///
1486  /// Breaks up each element into sub-elements for plotting
1487  /// purposes. We assume that all elements are of the same
1488  /// type (fct will break (in paranoid mode) if paraview
1489  /// output fcts of the elements are inconsistent).
1490  //========================================================
1492  std::ofstream& file_out,
1493  const unsigned& nplot,
1494  FiniteElement::SteadyExactSolutionFctPt exact_soln_pt) const
1495  {
1496  // Change the scientific format so that E is used rather than e
1497  file_out.setf(std::ios_base::uppercase);
1498 
1499  // Decide how many elements there are to be plotted
1500  unsigned long number_of_elements = this->Element_pt.size();
1501 
1502  // Cast to finite element and return if cast fails.
1503  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(0));
1504 
1505 #ifdef PARANOID
1506  if (fe_pt == 0)
1507  {
1508  throw OomphLibError("Recast for FiniteElement failed for element 0!\n",
1509  OOMPH_CURRENT_FUNCTION,
1510  OOMPH_EXCEPTION_LOCATION);
1511  }
1512 #endif
1513 
1514 
1515 #ifdef PARANOID
1516  // Check if all elements have the same number of degrees of freedom,
1517  // if they don't, paraview will break
1518  unsigned el_zero_ndof = fe_pt->nscalar_paraview();
1519  for (unsigned i = 1; i < number_of_elements; i++)
1520  {
1521  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1522  unsigned el_i_ndof = fe_pt->nscalar_paraview();
1523  if (el_zero_ndof != el_i_ndof)
1524  {
1525  std::stringstream error_stream;
1526  error_stream
1527  << "Element " << i << " has different number of degrees of freedom\n"
1528  << "than from previous elements, Paraview cannot handle this.\n"
1529  << "We suggest that the problem is broken up into submeshes instead."
1530  << std::endl;
1531  throw OomphLibError(
1532  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1533  }
1534  }
1535 #endif
1536 
1537  // Make variables to hold the number of nodes and elements
1538  unsigned long number_of_nodes = 0;
1539  unsigned long total_number_of_elements = 0;
1540 
1541  // Loop over all the elements to find total number of plot points
1542  for (unsigned i = 0; i < number_of_elements; i++)
1543  {
1544  // Cast to FiniteElement and (in paranoid mode) check
1545  // if cast has failed.
1546  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1547 
1548 #ifdef PARANOID
1549  if (fe_pt == 0)
1550  {
1551  std::stringstream error_stream;
1552  error_stream << "Recast for element " << i << " failed" << std::endl;
1553  throw OomphLibError(
1554  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1555  }
1556 #endif
1557 
1558  number_of_nodes += fe_pt->nplot_points_paraview(nplot);
1559  total_number_of_elements += fe_pt->nsub_elements_paraview(nplot);
1560  }
1561 
1562 
1563  // File Declaration
1564  //------------------
1565 
1566  // Insert the necessary lines plus header of file, and
1567  // number of nodes and elements
1568  file_out << "<?xml version=\"1.0\"?>\n"
1569  << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" "
1570  << "byte_order=\"LittleEndian\">\n"
1571  << "<UnstructuredGrid>\n"
1572  << "<Piece NumberOfPoints=\"" << number_of_nodes
1573  << "\" NumberOfCells=\"" << total_number_of_elements << "\">\n";
1574 
1575 
1576  // Point Data
1577  //-----------
1578 
1579  // Check the number of degrees of freedom
1580  unsigned ndof = fe_pt->nscalar_paraview();
1581 
1582  // Point data is going in here
1583  file_out << "<PointData ";
1584 
1585  // Insert just the first scalar name, since paraview reads everything
1586  // else after that as being of the same type. Get information from
1587  // first element.
1588  file_out << "Scalars=\"" << fe_pt->scalar_name_paraview(0) << "\">\n";
1589 
1590  // Loop over i scalar fields and j number of elements
1591  for (unsigned i = 0; i < ndof; i++)
1592  {
1593  file_out << "<DataArray type=\"Float32\" "
1594  << "Name=\"" << fe_pt->scalar_name_paraview(i) << "\" "
1595  << "format=\"ascii\""
1596  << ">\n";
1597 
1598  for (unsigned j = 0; j < number_of_elements; j++)
1599  {
1600  // Cast to FiniteElement and (in paranoid mode) check
1601  // if cast has failed.
1602  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(j));
1603 
1604 #ifdef PARANOID
1605  if (fe_pt == 0)
1606  {
1607  std::stringstream error_stream;
1608  error_stream << "Recast for element " << j << " failed" << std::endl;
1609  throw OomphLibError(error_stream.str(),
1610  OOMPH_CURRENT_FUNCTION,
1611  OOMPH_EXCEPTION_LOCATION);
1612  }
1613 #endif
1614 
1615  fe_pt->scalar_value_fct_paraview(file_out, i, nplot, exact_soln_pt);
1616  }
1617 
1618  // Close of the DataArray
1619  file_out << "</DataArray>\n";
1620  }
1621 
1622  // Close off the PointData set
1623  file_out << "</PointData>\n";
1624 
1625 
1626  // Geometric Points
1627  //------------------
1628 
1629  file_out << "<Points>\n"
1630  << "<DataArray type=\"Float32\""
1631  << " NumberOfComponents=\""
1632  // This always has to be 3 for an unstructured grid
1633  << 3 << "\" "
1634  << "format=\"ascii\">\n";
1635 
1636  // Loop over all the elements to print their plot points
1637  for (unsigned i = 0; i < number_of_elements; i++)
1638  {
1639  // Cast to FiniteElement and (in paranoid mode) check
1640  // if cast has failed.
1641  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1642 
1643 #ifdef PARANOID
1644  if (fe_pt == 0)
1645  {
1646  std::stringstream error_stream;
1647  error_stream << "Recast for element " << i << " faild" << std::endl;
1648  throw OomphLibError(
1649  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1650  }
1651 #endif
1652 
1653  fe_pt->output_paraview(file_out, nplot);
1654  }
1655 
1656  file_out << "</DataArray>\n"
1657  << "</Points>\n";
1658 
1659 
1660  // Cells
1661  //-------
1662 
1663  file_out
1664  << "<Cells>\n"
1665  << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n";
1666 
1667  // Make counter for keeping track of all the local elements,
1668  // because Paraview requires global coordinates
1669  unsigned counter = 0;
1670 
1671  // Write connectivity with the local elements
1672  for (unsigned i = 0; i < number_of_elements; i++)
1673  {
1674  // Cast to FiniteElement and (in paranoid mode) check
1675  // if cast has failed.
1676  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1677 
1678 #ifdef PARANOID
1679  if (fe_pt == 0)
1680  {
1681  std::stringstream error_stream;
1682  error_stream << "Recast for element " << i << " faild" << std::endl;
1683  throw OomphLibError(
1684  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1685  }
1686 #endif
1687  fe_pt->write_paraview_output_offset_information(file_out, nplot, counter);
1688  }
1689 
1690  file_out << "</DataArray>\n"
1691  << "<DataArray type=\"Int32\" "
1692  << "Name=\"offsets\" format=\"ascii\">\n";
1693 
1694  // Make variable that holds the current offset number
1695  unsigned offset_sum = 0;
1696 
1697  // Write the offset for the specific elements
1698  for (unsigned i = 0; i < number_of_elements; i++)
1699  {
1700  // Cast to FiniteElement and (in paranoid mode) check
1701  // if cast has failed.
1702  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1703 
1704 #ifdef PARANOID
1705  if (fe_pt == 0)
1706  {
1707  std::stringstream error_stream;
1708  error_stream << "Recast for element " << i << " failed" << std::endl;
1709  throw OomphLibError(
1710  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1711  }
1712 #endif
1713  fe_pt->write_paraview_offsets(file_out, nplot, offset_sum);
1714  }
1715 
1716  file_out << "</DataArray>\n"
1717  << "<DataArray type=\"UInt8\" Name=\"types\">\n";
1718 
1719  // Loop over all elements to get the type that they have
1720  for (unsigned i = 0; i < number_of_elements; i++)
1721  {
1722  // Cast to FiniteElement and (in paranoid mode) check
1723  // if cast has failed.
1724  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1725 
1726 #ifdef PARANOID
1727  if (fe_pt == 0)
1728  {
1729  std::stringstream error_stream;
1730  error_stream << "Recast for element " << i << " failed" << std::endl;
1731  throw OomphLibError(
1732  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1733  }
1734 #endif
1735 
1736  fe_pt->write_paraview_type(file_out, nplot);
1737  }
1738 
1739  file_out << "</DataArray>\n"
1740  << "</Cells>\n";
1741 
1742 
1743  // File Closure
1744  //-------------
1745  file_out << "</Piece>\n"
1746  << "</UnstructuredGrid>\n"
1747  << "</VTKFile>";
1748  }
1749 
1750 
1751  //========================================================
1752  /// Output in paraview format into specified file.
1753  ///
1754  /// Breaks up each element into sub-elements for plotting
1755  /// purposes. We assume that all elements are of the same
1756  /// type (fct will break (in paranoid mode) if paraview
1757  /// output fcts of the elements are inconsistent).
1758  //========================================================
1760  std::ofstream& file_out,
1761  const unsigned& nplot,
1762  const double& time,
1763  FiniteElement::UnsteadyExactSolutionFctPt exact_soln_pt) const
1764  {
1765  // Change the scientific format so that E is used rather than e
1766  file_out.setf(std::ios_base::uppercase);
1767 
1768  // Decide how many elements there are to be plotted
1769  unsigned long number_of_elements = this->Element_pt.size();
1770 
1771  // Cast to finite element and return if cast fails.
1772  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(0));
1773 
1774 #ifdef PARANOID
1775  if (fe_pt == 0)
1776  {
1777  throw OomphLibError("Recast for FiniteElement failed for element 0!\n",
1778  OOMPH_CURRENT_FUNCTION,
1779  OOMPH_EXCEPTION_LOCATION);
1780  }
1781 #endif
1782 
1783 
1784 #ifdef PARANOID
1785  // Check if all elements have the same number of degrees of freedom,
1786  // if they don't, paraview will break
1787  unsigned el_zero_ndof = fe_pt->nscalar_paraview();
1788  for (unsigned i = 1; i < number_of_elements; i++)
1789  {
1790  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1791  unsigned el_i_ndof = fe_pt->nscalar_paraview();
1792  if (el_zero_ndof != el_i_ndof)
1793  {
1794  std::stringstream error_stream;
1795  error_stream
1796  << "Element " << i << " has different number of degrees of freedom\n"
1797  << "than from previous elements, Paraview cannot handle this.\n"
1798  << "We suggest that the problem is broken up into submeshes instead."
1799  << std::endl;
1800  throw OomphLibError(
1801  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1802  }
1803  }
1804 #endif
1805 
1806  // Make variables to hold the number of nodes and elements
1807  unsigned long number_of_nodes = 0;
1808  unsigned long total_number_of_elements = 0;
1809 
1810  // Loop over all the elements to find total number of plot points
1811  for (unsigned i = 0; i < number_of_elements; i++)
1812  {
1813  // Cast to FiniteElement and (in paranoid mode) check
1814  // if cast has failed.
1815  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1816 
1817 #ifdef PARANOID
1818  if (fe_pt == 0)
1819  {
1820  std::stringstream error_stream;
1821  error_stream << "Recast for element " << i << " failed" << std::endl;
1822  throw OomphLibError(
1823  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1824  }
1825 #endif
1826 
1827  number_of_nodes += fe_pt->nplot_points_paraview(nplot);
1828  total_number_of_elements += fe_pt->nsub_elements_paraview(nplot);
1829  }
1830 
1831 
1832  // File Declaration
1833  //------------------
1834 
1835  // Insert the necessary lines plus header of file, and
1836  // number of nodes and elements
1837  file_out << "<?xml version=\"1.0\"?>\n"
1838  << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" "
1839  << "byte_order=\"LittleEndian\">\n"
1840  << "<UnstructuredGrid>\n"
1841  << "<Piece NumberOfPoints=\"" << number_of_nodes
1842  << "\" NumberOfCells=\"" << total_number_of_elements << "\">\n";
1843 
1844 
1845  // Point Data
1846  //-----------
1847 
1848  // Check the number of degrees of freedom
1849  unsigned ndof = fe_pt->nscalar_paraview();
1850 
1851  // Point data is going in here
1852  file_out << "<PointData ";
1853 
1854  // Insert just the first scalar name, since paraview reads everything
1855  // else after that as being of the same type. Get information from
1856  // first element.
1857  file_out << "Scalars=\"" << fe_pt->scalar_name_paraview(0) << "\">\n";
1858 
1859  // Loop over i scalar fields and j number of elements
1860  for (unsigned i = 0; i < ndof; i++)
1861  {
1862  file_out << "<DataArray type=\"Float32\" "
1863  << "Name=\"" << fe_pt->scalar_name_paraview(i) << "\" "
1864  << "format=\"ascii\""
1865  << ">\n";
1866 
1867  for (unsigned j = 0; j < number_of_elements; j++)
1868  {
1869  // Cast to FiniteElement and (in paranoid mode) check
1870  // if cast has failed.
1871  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(j));
1872 
1873 #ifdef PARANOID
1874  if (fe_pt == 0)
1875  {
1876  std::stringstream error_stream;
1877  error_stream << "Recast for element " << j << " failed" << std::endl;
1878  throw OomphLibError(error_stream.str(),
1879  OOMPH_CURRENT_FUNCTION,
1880  OOMPH_EXCEPTION_LOCATION);
1881  }
1882 #endif
1883 
1885  file_out, i, nplot, time, exact_soln_pt);
1886  }
1887 
1888  // Close of the DataArray
1889  file_out << "</DataArray>\n";
1890  }
1891 
1892  // Close off the PointData set
1893  file_out << "</PointData>\n";
1894 
1895 
1896  // Geometric Points
1897  //------------------
1898 
1899  file_out << "<Points>\n"
1900  << "<DataArray type=\"Float32\""
1901  << " NumberOfComponents=\""
1902  // This always has to be 3 for an unstructured grid
1903  << 3 << "\" "
1904  << "format=\"ascii\">\n";
1905 
1906  // Loop over all the elements to print their plot points
1907  for (unsigned i = 0; i < number_of_elements; i++)
1908  {
1909  // Cast to FiniteElement and (in paranoid mode) check
1910  // if cast has failed.
1911  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1912 
1913 #ifdef PARANOID
1914  if (fe_pt == 0)
1915  {
1916  std::stringstream error_stream;
1917  error_stream << "Recast for element " << i << " faild" << std::endl;
1918  throw OomphLibError(
1919  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1920  }
1921 #endif
1922 
1923  fe_pt->output_paraview(file_out, nplot);
1924  }
1925 
1926  file_out << "</DataArray>\n"
1927  << "</Points>\n";
1928 
1929 
1930  // Cells
1931  //-------
1932 
1933  file_out
1934  << "<Cells>\n"
1935  << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n";
1936 
1937  // Make counter for keeping track of all the local elements,
1938  // because Paraview requires global coordinates
1939  unsigned counter = 0;
1940 
1941  // Write connectivity with the local elements
1942  for (unsigned i = 0; i < number_of_elements; i++)
1943  {
1944  // Cast to FiniteElement and (in paranoid mode) check
1945  // if cast has failed.
1946  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1947 
1948 #ifdef PARANOID
1949  if (fe_pt == 0)
1950  {
1951  std::stringstream error_stream;
1952  error_stream << "Recast for element " << i << " faild" << std::endl;
1953  throw OomphLibError(
1954  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1955  }
1956 #endif
1957  fe_pt->write_paraview_output_offset_information(file_out, nplot, counter);
1958  }
1959 
1960  file_out << "</DataArray>\n"
1961  << "<DataArray type=\"Int32\" "
1962  << "Name=\"offsets\" format=\"ascii\">\n";
1963 
1964  // Make variable that holds the current offset number
1965  unsigned offset_sum = 0;
1966 
1967  // Write the offset for the specific elements
1968  for (unsigned i = 0; i < number_of_elements; i++)
1969  {
1970  // Cast to FiniteElement and (in paranoid mode) check
1971  // if cast has failed.
1972  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1973 
1974 #ifdef PARANOID
1975  if (fe_pt == 0)
1976  {
1977  std::stringstream error_stream;
1978  error_stream << "Recast for element " << i << " failed" << std::endl;
1979  throw OomphLibError(
1980  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1981  }
1982 #endif
1983  fe_pt->write_paraview_offsets(file_out, nplot, offset_sum);
1984  }
1985 
1986  file_out << "</DataArray>\n"
1987  << "<DataArray type=\"UInt8\" Name=\"types\">\n";
1988 
1989  // Loop over all elements to get the type that they have
1990  for (unsigned i = 0; i < number_of_elements; i++)
1991  {
1992  // Cast to FiniteElement and (in paranoid mode) check
1993  // if cast has failed.
1994  FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1995 
1996 #ifdef PARANOID
1997  if (fe_pt == 0)
1998  {
1999  std::stringstream error_stream;
2000  error_stream << "Recast for element " << i << " failed" << std::endl;
2001  throw OomphLibError(
2002  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2003  }
2004 #endif
2005 
2006  fe_pt->write_paraview_type(file_out, nplot);
2007  }
2008 
2009  file_out << "</DataArray>\n"
2010  << "</Cells>\n";
2011 
2012 
2013  // File Closure
2014  //-------------
2015  file_out << "</Piece>\n"
2016  << "</UnstructuredGrid>\n"
2017  << "</VTKFile>";
2018  }
2019 
2020 
2021  //========================================================
2022  /// Output function for the mesh class
2023  ///
2024  /// Loop over all elements and plot (i.e. execute
2025  /// the element's own output() function)
2026  //========================================================
2027  void Mesh::output(std::ostream& outfile)
2028  {
2029  // Loop over the elements and call their output functions
2030  // Assign Element_pt_range
2031  unsigned long Element_pt_range = Element_pt.size();
2032  for (unsigned long e = 0; e < Element_pt_range; e++)
2033  {
2034  // Try to cast to FiniteElement
2035  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2036  if (el_pt == 0)
2037  {
2038  oomph_info << "Can't execute output(...) for non FiniteElements"
2039  << std::endl;
2040  }
2041  else
2042  {
2043 #ifdef OOMPH_HAS_MPI
2045 #endif
2046  {
2047  el_pt->output(outfile);
2048  }
2049 #ifdef OOMPH_HAS_MPI
2050  else
2051  {
2052  if (!el_pt->is_halo())
2053  {
2054  el_pt->output(outfile);
2055  }
2056  }
2057 #endif
2058  }
2059  }
2060  }
2061 
2062  //========================================================
2063  /// Output function for the mesh class
2064  ///
2065  /// Loop over all elements and plot (i.e. execute
2066  /// the element's own output() function). Use
2067  /// n_plot plot points in each coordinate direction.
2068  //========================================================
2069  void Mesh::output(std::ostream& outfile, const unsigned& n_plot)
2070  {
2071  // Loop over the elements and call their output functions
2072  // Assign Element_pt_range
2073  unsigned long Element_pt_range = Element_pt.size();
2074 
2075  for (unsigned long e = 0; e < Element_pt_range; e++)
2076  {
2077  // Try to cast to FiniteElement
2078  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2079  if (el_pt == 0)
2080  {
2081  oomph_info << "Can't execute output(...) for non FiniteElements"
2082  << std::endl;
2083  }
2084  else
2085  {
2086 #ifdef OOMPH_HAS_MPI
2088 #endif
2089  {
2090  el_pt->output(outfile, n_plot);
2091  }
2092 #ifdef OOMPH_HAS_MPI
2093  else
2094  {
2095  if (!el_pt->is_halo())
2096  {
2097  el_pt->output(outfile, n_plot);
2098  }
2099  }
2100 #endif
2101  }
2102  }
2103  }
2104 
2105 
2106  //========================================================
2107  /// Output function for the mesh class
2108  ///
2109  /// Loop over all elements and plot (i.e. execute
2110  /// the element's own output() function)
2111  /// (C style output)
2112  //========================================================
2113  void Mesh::output(FILE* file_pt)
2114  {
2115  // Loop over the elements and call their output functions
2116  // Assign Element_pt_range
2117  unsigned long Element_pt_range = Element_pt.size();
2118  for (unsigned long e = 0; e < Element_pt_range; e++)
2119  {
2120  // Try to cast to FiniteElement
2121  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2122  if (el_pt == 0)
2123  {
2124  oomph_info << "Can't execute output(...) for non FiniteElements"
2125  << std::endl;
2126  }
2127  else
2128  {
2129 #ifdef OOMPH_HAS_MPI
2131 #endif
2132  {
2133  el_pt->output(file_pt);
2134  }
2135 #ifdef OOMPH_HAS_MPI
2136  else
2137  {
2138  if (!el_pt->is_halo())
2139  {
2140  el_pt->output(file_pt);
2141  }
2142  }
2143 #endif
2144  }
2145  }
2146  }
2147 
2148  //========================================================
2149  /// Output function for the mesh class
2150  ///
2151  /// Loop over all elements and plot (i.e. execute
2152  /// the element's own output() function). Use
2153  /// n_plot plot points in each coordinate direction.
2154  /// (C style output)
2155  //========================================================
2156  void Mesh::output(FILE* file_pt, const unsigned& n_plot)
2157  {
2158  // Loop over the elements and call their output functions
2159  // Assign Element_pt_range
2160  unsigned long Element_pt_range = Element_pt.size();
2161  for (unsigned long e = 0; e < Element_pt_range; e++)
2162  {
2163  // Try to cast to FiniteElement
2164  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2165  if (el_pt == 0)
2166  {
2167  oomph_info << "Can't execute output(...) for non FiniteElements"
2168  << std::endl;
2169  }
2170  else
2171  {
2172 #ifdef OOMPH_HAS_MPI
2174 #endif
2175  {
2176  el_pt->output(file_pt, n_plot);
2177  }
2178 #ifdef OOMPH_HAS_MPI
2179  else
2180  {
2181  if (!el_pt->is_halo())
2182  {
2183  el_pt->output(file_pt, n_plot);
2184  }
2185  }
2186 #endif
2187  }
2188  }
2189  }
2190 
2191 
2192  //========================================================
2193  /// Output function for the mesh class
2194  ///
2195  /// Loop over all elements and plot (i.e. execute
2196  /// the element's own output() function). Use
2197  /// n_plot plot points in each coordinate direction.
2198  //========================================================
2199  void Mesh::output_fct(std::ostream& outfile,
2200  const unsigned& n_plot,
2202  {
2203  // Loop over the elements and call their output functions
2204  // Assign Element_pt_range
2205  unsigned long Element_pt_range = Element_pt.size();
2206  for (unsigned long e = 0; e < Element_pt_range; e++)
2207  {
2208  // Try to cast to FiniteElement
2209  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2210  if (el_pt == 0)
2211  {
2212  oomph_info << "Can't execute output_fct(...) for non FiniteElements"
2213  << std::endl;
2214  }
2215  else
2216  {
2217 #ifdef OOMPH_HAS_MPI
2219 #endif
2220  {
2221  el_pt->output_fct(outfile, n_plot, exact_soln_pt);
2222  }
2223 #ifdef OOMPH_HAS_MPI
2224  else
2225  {
2226  if (!el_pt->is_halo())
2227  {
2228  el_pt->output_fct(outfile, n_plot, exact_soln_pt);
2229  }
2230  }
2231 #endif
2232  }
2233  }
2234  }
2235 
2236  //========================================================
2237  /// Output function for the mesh class
2238  ///
2239  /// Loop over all elements and plot (i.e. execute
2240  /// the element's own output() function) at time t. Use
2241  /// n_plot plot points in each coordinate direction.
2242  //========================================================
2243  void Mesh::output_fct(std::ostream& outfile,
2244  const unsigned& n_plot,
2245  const double& time,
2247  {
2248  // Loop over the elements and call their output functions
2249  // Assign Element_pt_range
2250  unsigned long Element_pt_range = Element_pt.size();
2251  for (unsigned long e = 0; e < Element_pt_range; e++)
2252  {
2253  // Try to cast to FiniteElement
2254  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2255  if (el_pt == 0)
2256  {
2257  oomph_info << "Can't execute output_fct(...) for non FiniteElements"
2258  << std::endl;
2259  }
2260  else
2261  {
2262 #ifdef OOMPH_HAS_MPI
2264 #endif
2265  {
2266  el_pt->output_fct(outfile, n_plot, time, exact_soln_pt);
2267  }
2268 #ifdef OOMPH_HAS_MPI
2269  else
2270  {
2271  if (!el_pt->is_halo())
2272  {
2273  el_pt->output_fct(outfile, n_plot, time, exact_soln_pt);
2274  }
2275  }
2276 #endif
2277  }
2278  }
2279  }
2280 
2281  //==================================================================
2282  /// Assign the initial values for an impulsive start, which is
2283  /// acheived by looping over all data in the mesh (internal element
2284  /// data and data stored at nodes) and setting the calling the
2285  /// assign_initial_values_impulsive() function for each data's
2286  /// timestepper
2287  //=================================================================
2289  {
2290  // Loop over the elements
2291  unsigned long Nelement = nelement();
2292  for (unsigned long e = 0; e < Nelement; e++)
2293  {
2294  // Find the number of internal dofs
2295  unsigned Ninternal = element_pt(e)->ninternal_data();
2296  // Loop over internal dofs and shift the time values
2297  // using the internals data's timestepper
2298  for (unsigned j = 0; j < Ninternal; j++)
2299  {
2300  element_pt(e)
2301  ->internal_data_pt(j)
2302  ->time_stepper_pt()
2303  ->assign_initial_values_impulsive(element_pt(e)->internal_data_pt(j));
2304  }
2305  }
2306 
2307  // Loop over the nodes
2308  unsigned long n_node = nnode();
2309  for (unsigned long n = 0; n < n_node; n++)
2310  {
2311  // Assign initial values using the Node's timestepper
2312  Node_pt[n]->time_stepper_pt()->assign_initial_values_impulsive(
2313  Node_pt[n]);
2314  // Assign initial positions using the Node's timestepper
2315  Node_pt[n]
2316  ->position_time_stepper_pt()
2317  ->assign_initial_positions_impulsive(Node_pt[n]);
2318  }
2319  }
2320 
2321  //===============================================================
2322  /// Shift time-dependent data along for next timestep:
2323  /// Again this is achieved by looping over all data and calling
2324  /// the functions defined in each data object's timestepper.
2325  //==============================================================
2327  {
2328  // Loop over the elements which shift their internal data
2329  // via their own timesteppers
2330  const unsigned long Nelement = nelement();
2331  for (unsigned long e = 0; e < Nelement; e++)
2332  {
2333  // Find the number of internal dofs
2334  const unsigned Ninternal = element_pt(e)->ninternal_data();
2335  // Loop over internal dofs and shift the time values
2336  // using the internals data's timestepper
2337  for (unsigned j = 0; j < Ninternal; j++)
2338  {
2339  element_pt(e)
2340  ->internal_data_pt(j)
2341  ->time_stepper_pt()
2342  ->shift_time_values(element_pt(e)->internal_data_pt(j));
2343  }
2344  }
2345 
2346  // Loop over the nodes
2347  const unsigned long n_node = nnode();
2348  for (unsigned long n = 0; n < n_node; n++)
2349  {
2350  // Shift the Data associated with the nodes with the Node's own
2351  // timestepper
2352  Node_pt[n]->time_stepper_pt()->shift_time_values(Node_pt[n]);
2353  // Push history of nodal positions back
2354  Node_pt[n]->position_time_stepper_pt()->shift_time_positions(Node_pt[n]);
2355  }
2356  }
2357 
2358  //=========================================================================
2359  /// Calculate predictions for all Data and positions associated
2360  /// with the mesh. This is usually only used for adaptive time-stepping
2361  /// when the comparison between a predicted value and the actual value
2362  /// is usually used to determine the change in step size. Again the
2363  /// loop is over all data in the mesh and individual timestepper functions
2364  /// for each data value are called.
2365  //=========================================================================
2367  {
2368  // Loop over the elements which shift their internal data
2369  // via their own timesteppers
2370  const unsigned long Nelement = nelement();
2371  for (unsigned long e = 0; e < Nelement; e++)
2372  {
2373  // Find the number of internal dofs
2374  const unsigned Ninternal = element_pt(e)->ninternal_data();
2375  // Loop over internal dofs and calculate predicted positions
2376  // using the internals data's timestepper
2377  for (unsigned j = 0; j < Ninternal; j++)
2378  {
2379  element_pt(e)
2380  ->internal_data_pt(j)
2381  ->time_stepper_pt()
2382  ->calculate_predicted_values(element_pt(e)->internal_data_pt(j));
2383  }
2384  }
2385 
2386  // Loop over the nodes
2387  const unsigned long n_node = nnode();
2388  for (unsigned long n = 0; n < n_node; n++)
2389  {
2390  // Calculate the predicted values at the nodes
2391  Node_pt[n]->time_stepper_pt()->calculate_predicted_values(Node_pt[n]);
2392  // Calculate the predicted positions
2393  Node_pt[n]->position_time_stepper_pt()->calculate_predicted_positions(
2394  Node_pt[n]);
2395  }
2396  }
2397 
2398  //===============================================================
2399  /// Virtual function that should be overloaded if the mesh
2400  /// has any mesh level storage of the timestepper
2401  //==================================================================
2402  void Mesh::set_mesh_level_time_stepper(TimeStepper* const& time_stepper_pt,
2403  const bool& preserve_existing_data)
2404  {
2405 #ifdef PARANOID
2407  {
2408  std::ostringstream warning_stream;
2409  warning_stream
2410  << "Empty set_mesh_level_time_stepper() has been called.\n"
2411  << "This function needs to be overloaded to reset any (pointers to) \n"
2412  << "timesteppers for meshes that store timesteppers in locations "
2413  "other\n"
2414  << "than the Nodes or Elements;\n"
2415  << "e.g. SpineMeshes have SpineData with timesteppers,\n"
2416  << "Triangle and TetMeshes store the timestepper for use in "
2417  "adaptivity.\n\n\n";
2418  warning_stream
2419  << "If you are solving a continuation or bifurcation detecion\n"
2420  << "problem and strange things are happening, then check that\n"
2421  << "you don't need to overload this function for your mesh."
2422  << "\n This warning can be suppressed by setting:\n"
2423  << "Mesh::Suppress_warning_about_empty_mesh_level_time_stepper_"
2424  "function=true"
2425  << std::endl;
2427  warning_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2428  }
2429 #endif
2430  }
2431 
2432  //===============================================================
2433  /// Set the values of auxilliary data used in continuation problems
2434  /// when the data is pinned.
2435  //==================================================================
2437  ContinuationStorageScheme* const& continuation_storage_pt)
2438  {
2439  // Loop over the nodes
2440  const unsigned long n_node = this->nnode();
2441  for (unsigned long n = 0; n < n_node; n++)
2442  {
2443  continuation_storage_pt->set_consistent_pinned_values(this->Node_pt[n]);
2444  continuation_storage_pt->set_consistent_pinned_positions(
2445  this->Node_pt[n]);
2446  }
2447 
2448  // Loop over the elements
2449  const unsigned long n_element = this->nelement();
2450  for (unsigned long e = 0; e < n_element; e++)
2451  {
2452  // Cache pointer to the elemnet
2453  GeneralisedElement* const elem_pt = this->element_pt(e);
2454  // Find the number of internal dofs
2455  const unsigned n_internal = elem_pt->ninternal_data();
2456 
2457  // Loop over internal dofs and test the data
2458  for (unsigned j = 0; j < n_internal; j++)
2459  {
2460  continuation_storage_pt->set_consistent_pinned_values(
2461  elem_pt->internal_data_pt(j));
2462  }
2463  }
2464  }
2465 
2466 
2467  //===============================================================
2468  /// Return true if the pointer corresponds to any data stored in
2469  /// the mesh and false if not
2470  //==================================================================
2471  bool Mesh::does_pointer_correspond_to_mesh_data(double* const& parameter_pt)
2472  {
2473  // Loop over the nodes
2474  const unsigned long n_node = this->nnode();
2475  for (unsigned long n = 0; n < n_node; n++)
2476  {
2477  // Check the values and positional data associated with each node
2478  if ((this->Node_pt[n]->does_pointer_correspond_to_value(parameter_pt)) ||
2479  (this->Node_pt[n]->does_pointer_correspond_to_position_data(
2480  parameter_pt)))
2481  {
2482  return true;
2483  }
2484  }
2485 
2486  // Loop over the elements
2487  const unsigned long n_element = this->nelement();
2488  for (unsigned long e = 0; e < n_element; e++)
2489  {
2490  // Cache pointer to the elemnet
2491  GeneralisedElement* const elem_pt = this->element_pt(e);
2492 
2493  // Find the number of internal dofs
2494  const unsigned n_internal = elem_pt->ninternal_data();
2495 
2496  // Loop over internal dofs and test the data
2497  for (unsigned j = 0; j < n_internal; j++)
2498  {
2500  parameter_pt))
2501  {
2502  return true;
2503  }
2504  }
2505  }
2506 
2507  // If we get here we haven't found the data, so return false
2508  return false;
2509  }
2510 
2511 
2512  //===============================================================
2513  /// Set the time stepper associated with all the nodal data
2514  /// in the problem
2515  //==============================================================
2516  void Mesh::set_nodal_time_stepper(TimeStepper* const& time_stepper_pt,
2517  const bool& preserve_existing_data)
2518  {
2519  // Loop over the nodes
2520  const unsigned long n_node = this->nnode();
2521  for (unsigned long n = 0; n < n_node; n++)
2522  {
2523  // Set the timestepper associated with each node
2524  this->Node_pt[n]->set_time_stepper(time_stepper_pt,
2525  preserve_existing_data);
2526  this->Node_pt[n]->set_position_time_stepper(time_stepper_pt,
2527  preserve_existing_data);
2528  }
2529  }
2530 
2531  //===============================================================
2532  /// Set the time stepper associated with all internal data stored
2533  /// in the elements in the mesh
2534  //===============================================================
2536  TimeStepper* const& time_stepper_pt, const bool& preserve_existing_data)
2537  {
2538  // Loop over the elements
2539  const unsigned long n_element = this->nelement();
2540  for (unsigned long e = 0; e < n_element; e++)
2541  {
2542  // Find the number of internal dofs
2543  const unsigned n_internal = this->element_pt(e)->ninternal_data();
2544 
2545  // Loop over internal dofs and set the timestepper
2546  for (unsigned j = 0; j < n_internal; j++)
2547  {
2548  this->element_pt(e)->internal_data_pt(j)->set_time_stepper(
2549  time_stepper_pt, preserve_existing_data);
2550  }
2551  }
2552  }
2553 
2554  //========================================================================
2555  /// A function that upgrades an ordinary node to a boundary node.
2556  /// All pointers to the node from the mesh's elements are found.
2557  /// and replaced by pointers to the new boundary node. If the node
2558  /// is present in the mesh's list of nodes, that pointer is also
2559  /// replaced. Finally, the pointer argument node_pt addresses the new
2560  /// node on return from the function.
2561  /// We shouldn't ever really use this, but it does make life that
2562  /// bit easier for the lazy mesh writer.
2563  //=======================================================================
2565  {
2566  // Cache a list of FiniteElement pointers for use in this function.
2567  Vector<FiniteElement*> fe_pt(nelement(), 0);
2568  for (unsigned e = 0, ne = nelement(); e < ne; e++)
2569  {
2570  // Some elements may not have been build yet, just store a null pointer
2571  // for these cases.
2572  if (Element_pt[e] == 0) fe_pt[e] = 0;
2573  else
2574  fe_pt[e] = finite_element_pt(e);
2575  }
2576 
2577  // Now call the real function
2579  }
2580 
2581  // ============================================================
2582  /// As convert_to_boundary_node but with a vector of pre-"dynamic cast"ed
2583  /// pointers passed in. If this function is being called often then
2584  /// creating this vector and passing it in explicitly each time can give a
2585  /// large speed up.
2586  // Note: the real reason that this function is so slow in the first place
2587  // is because it has to loop over all elements. So if you use this function
2588  // O(N) times your boundary node creation complexity is O(N^2).
2589  // ============================================================
2591  Node*& node_pt, const Vector<FiniteElement*>& finite_element_pt)
2592  {
2593  // If the node is already a boundary node, then return straight away,
2594  // we don't need to do anything
2595  if (dynamic_cast<BoundaryNodeBase*>(node_pt) != 0)
2596  {
2597  return;
2598  }
2599 
2600  // Loop over all the elements in the mesh and find all those in which
2601  // the present node is referenced and the corresponding local node number
2602  // in those elements.
2603 
2604  // Storage for elements and local node number
2605  std::list<std::pair<unsigned long, int>>
2606  list_of_elements_and_local_node_numbers;
2607 
2608  // Loop over all elements
2609  unsigned long n_element = this->nelement();
2610  for (unsigned long e = 0; e < n_element; e++)
2611  {
2612  // Buffer the case when we have not yet filled up the element array
2613  // Unfortunately, we should not assume that the array has been filled
2614  // in a linear order, so we can't break out early.
2615  if (Element_pt[e] != 0)
2616  {
2617  // Find the local node number of the passed node
2618  int node_number = finite_element_pt[e]->get_node_number(node_pt);
2619  // If the node is present in the element, add it to our list and
2620  // NULL out the local element entries
2621  if (node_number != -1)
2622  {
2623  list_of_elements_and_local_node_numbers.insert(
2624  list_of_elements_and_local_node_numbers.end(),
2625  std::make_pair(e, node_number));
2626  // Null it out
2627  finite_element_pt[e]->node_pt(node_number) = 0;
2628  }
2629  }
2630  } // End of loop over elements
2631 
2632  // If there are no entries in the list we are in real trouble
2633  if (list_of_elements_and_local_node_numbers.empty())
2634  {
2635  std::ostringstream error_stream;
2636  error_stream << "Node " << node_pt
2637  << " is not contained in any elements in the Mesh."
2638  << std::endl
2639  << "How was it created then?" << std::endl;
2640 
2641  throw OomphLibError(
2642  error_stream.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2643  }
2644 
2645 
2646  // Create temporary storage for a pointer to the old node.
2647  // This is required because if we have passed a reference to the
2648  // first element that we find, constructing the new node
2649  // will over-write our pointer and we'll get segmentation faults.
2650  Node* old_node_pt = node_pt;
2651 
2652  // We now create the new node by using the first element in the list
2653  std::list<std::pair<unsigned long, int>>::iterator list_it =
2654  list_of_elements_and_local_node_numbers.begin();
2655 
2656  // Create a new boundary node, using the timestepper from the
2657  // original node
2658  Node* new_node_pt =
2659  finite_element_pt[list_it->first]->construct_boundary_node(
2660  list_it->second, node_pt->time_stepper_pt());
2661 
2662  // Now copy all the information accross from the old node
2663 
2664  // Can we cast the node to a solid node
2665  SolidNode* solid_node_pt = dynamic_cast<SolidNode*>(new_node_pt);
2666  // If it's a solid node, do the casting
2667  if (solid_node_pt != 0)
2668  {
2669  solid_node_pt->copy(dynamic_cast<SolidNode*>(old_node_pt));
2670  }
2671  else
2672  {
2673  new_node_pt->copy(old_node_pt);
2674  }
2675 
2676  // Loop over all other elements in the list and set their pointers
2677  // to the new node
2678  for (++list_it; // Increment the iterator
2679  list_it != list_of_elements_and_local_node_numbers.end();
2680  ++list_it)
2681  {
2682  finite_element_pt[list_it->first]->node_pt(list_it->second) = new_node_pt;
2683  }
2684 
2685  // Finally, find the position of the node in the global mesh
2687  std::find(Node_pt.begin(), Node_pt.end(), old_node_pt);
2688 
2689  // If it is in the mesh, update the pointer
2690  if (it != Node_pt.end())
2691  {
2692  *it = new_node_pt;
2693  }
2694 
2695  // Can now delete the old node
2696  delete old_node_pt;
2697 
2698  // Replace the passed pointer by a pointer to the new node
2699  // Note that in most cases, this will be wasted work because the node
2700  // pointer will either the pointer in the mesh's or an element's
2701  // node_pt vector. Still assignment is quicker than an if to check this.
2702  node_pt = new_node_pt;
2703  }
2704 
2705 #ifdef OOMPH_HAS_MPI
2706 
2707  //========================================================================
2708  /// Setup shared node scheme: Shared node lookup scheme contains
2709  /// a unique correspondence between all nodes on the halo/haloed
2710  /// elements between two processors.
2711  //========================================================================
2713  {
2714  // Determine the shared nodes lookup scheme - all nodes located on the
2715  // halo(ed) elements between two domains. This scheme is necessary in order
2716  // to identify master nodes that may not be present in the halo-haloed
2717  // element lookup scheme between two processors (for example, if the node
2718  // is on an element which is in a lookup scheme between two higher-numbered
2719  // processors)
2720 
2721  double t_start = 0.0;
2723  {
2724  t_start = TimingHelpers::timer();
2725  }
2726 
2727  // Store number of processors and current process
2728  int n_proc = Comm_pt->nproc();
2729  int my_rank = Comm_pt->my_rank();
2730 
2731  // Need to clear the shared node scheme first
2732  Shared_node_pt.clear();
2733 
2734  for (int d = 0; d < n_proc; d++)
2735  {
2736  // map of bools for whether the node has been shared,
2737  // initialised to 0 (false) for each domain d
2738  std::map<Node*, bool> node_shared;
2739 
2740  // For all domains lower than the current domain: Do halos first
2741  // then haloed, to ensure correct order in lookup scheme from
2742  // the other side
2743  if (d < my_rank)
2744  {
2745  // Get the nodes from the halo elements first
2746  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(d));
2747  unsigned nhalo_elem = halo_elem_pt.size();
2748 
2749  for (unsigned e = 0; e < nhalo_elem; e++)
2750  {
2751  // Get finite element
2752  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
2753  if (el_pt != 0)
2754  {
2755  // Loop over nodes
2756  unsigned nnod = el_pt->nnode();
2757  for (unsigned j = 0; j < nnod; j++)
2758  {
2759  Node* nod_pt = el_pt->node_pt(j);
2760 
2761  // Add it as a shared node from current domain
2762  if (!node_shared[nod_pt])
2763  {
2764  this->add_shared_node_pt(d, nod_pt);
2765  node_shared[nod_pt] = true;
2766  }
2767 
2768  } // end loop over nodes
2769  }
2770 
2771  } // end loop over elements
2772 
2773  // Now get any left over nodes on the haloed elements
2774  Vector<GeneralisedElement*> haloed_elem_pt(this->haloed_element_pt(d));
2775  unsigned nhaloed_elem = haloed_elem_pt.size();
2776 
2777  for (unsigned e = 0; e < nhaloed_elem; e++)
2778  {
2779  // Get element
2780  FiniteElement* el_pt =
2781  dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
2782  if (el_pt != 0)
2783  {
2784  // Loop over the nodes
2785  unsigned nnod = el_pt->nnode();
2786  for (unsigned j = 0; j < nnod; j++)
2787  {
2788  Node* nod_pt = el_pt->node_pt(j);
2789 
2790  // Add it as a shared node from current domain
2791  if (!node_shared[nod_pt])
2792  {
2793  this->add_shared_node_pt(d, nod_pt);
2794  node_shared[nod_pt] = true;
2795  }
2796 
2797  } // end loop over nodes
2798  }
2799  } // end loop over elements
2800  }
2801 
2802  // If the domain is bigger than the current rank: Do haloed first
2803  // then halo, to ensure correct order in lookup scheme from
2804  // the other side
2805  if (d > my_rank)
2806  {
2807  // Get the nodes from the haloed elements first
2808  Vector<GeneralisedElement*> haloed_elem_pt(this->haloed_element_pt(d));
2809  unsigned nhaloed_elem = haloed_elem_pt.size();
2810 
2811  for (unsigned e = 0; e < nhaloed_elem; e++)
2812  {
2813  // Get element
2814  FiniteElement* el_pt =
2815  dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
2816  if (el_pt != 0)
2817  {
2818  // Loop over nodes
2819  unsigned nnod = el_pt->nnode();
2820  for (unsigned j = 0; j < nnod; j++)
2821  {
2822  Node* nod_pt = el_pt->node_pt(j);
2823 
2824  // Add it as a shared node from current domain
2825  if (!node_shared[nod_pt])
2826  {
2827  this->add_shared_node_pt(d, nod_pt);
2828  node_shared[nod_pt] = true;
2829  }
2830 
2831  } // end loop over nodes
2832  }
2833  } // end loop over elements
2834 
2835  // Now get the nodes from any halo elements left over
2836  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(d));
2837  unsigned nhalo_elem = halo_elem_pt.size();
2838 
2839  for (unsigned e = 0; e < nhalo_elem; e++)
2840  {
2841  // Get element
2842  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
2843  if (el_pt != 0)
2844  {
2845  // Loop over nodes
2846  unsigned nnod = el_pt->nnode();
2847  for (unsigned j = 0; j < nnod; j++)
2848  {
2849  Node* nod_pt = el_pt->node_pt(j);
2850 
2851  // Add it as a shared node from current domain
2852  if (!node_shared[nod_pt])
2853  {
2854  this->add_shared_node_pt(d, nod_pt);
2855  node_shared[nod_pt] = true;
2856  }
2857 
2858  } // end loop over nodes
2859  }
2860  } // end loop over elements
2861 
2862  } // end if (d ...)
2863 
2864  } // end loop over processes
2865 
2866 
2867  double t_end = 0.0;
2869  {
2870  t_end = TimingHelpers::timer();
2871  oomph_info << "Time for identification of shared nodes: "
2872  << t_end - t_start << std::endl;
2873  oomph_info.stream_pt()->flush();
2874  }
2875  }
2876 
2877  //========================================================================
2878  /// Synchronise shared node lookup schemes to cater for the
2879  /// the case where:
2880  /// (1) a certain node on the current processor is halo with proc p
2881  /// (i.e. its non-halo counterpart lives on processor p)
2882  /// (2) that node also exists (also as a halo) on another processor
2883  /// (q, say) where its non-halo counter part is also known to be
2884  /// on processor p.
2885  /// However, without calling this function the current processor does not
2886  /// necessarily know that it shares a node with processor q. This
2887  /// information can be required, e.g. when synchronising hanging node
2888  /// schemes over all processors.
2889  //========================================================================
2890  void Mesh::synchronise_shared_nodes(const bool& report_stats)
2891  {
2892  double t_start = 0.0;
2894  {
2895  t_start = TimingHelpers::timer();
2896  }
2897 
2898  double tt_start = 0.0;
2899  double tt_end = 0.0;
2901  {
2902  tt_start = TimingHelpers::timer();
2903  }
2904 
2905  // Storage for current processor and number of processors
2906  int n_proc = Comm_pt->nproc();
2907  int my_rank = Comm_pt->my_rank();
2908 
2909 
2910 #ifdef PARANOID
2911  // Has some twit filled in shared nodes with own process?!
2912  // Check at start of function
2913  if (Shared_node_pt[my_rank].size() != 0)
2914  {
2915  throw OomphLibError(
2916  "Processor has shared nodes with itself! Something's gone wrong!",
2917  OOMPH_CURRENT_FUNCTION,
2918  OOMPH_EXCEPTION_LOCATION);
2919  }
2920 #endif
2921 
2922 
2923  // Stage 1: Populate the set of of processor IDs that
2924  // each haloed node on current processor is haloed by.
2925  std::map<Node*, std::set<int>> shared_domain_set;
2926 
2927  // Associate unique number with any haloed nodes on this processor
2928  std::map<Node*, unsigned> global_haloed_node_number_plus_one;
2929  unsigned global_haloed_count = 0;
2930 
2931  // Loop over domains
2932  for (int d = 0; d < n_proc; d++)
2933  {
2934  // Don't talk to yourself
2935  if (d != my_rank)
2936  {
2937  // Loop over haloed nodes
2938  unsigned nnod_haloed = this->nhaloed_node(d);
2939  for (unsigned j = 0; j < nnod_haloed; j++)
2940  {
2941  Node* nod_pt = this->haloed_node_pt(d, j);
2942  shared_domain_set[nod_pt].insert(d);
2943  if (global_haloed_node_number_plus_one[nod_pt] == 0)
2944  {
2945  global_haloed_node_number_plus_one[nod_pt] =
2946  global_haloed_count + 1;
2947  global_haloed_count++;
2948  }
2949  }
2950  }
2951  }
2952 
2954  {
2955  tt_end = TimingHelpers::timer();
2956  oomph_info << "Time for initial classification in "
2957  "Mesh::synchronise_shared_nodes(): "
2958  << tt_end - tt_start << std::endl;
2959  tt_start = TimingHelpers::timer();
2960  }
2961 
2962 
2963  // Stage 2: All-to-all communication to inform all processors that
2964  // hold halo nodes with current processor about all domains that the current
2965  // processor shares nodes with [This will allow the other processors to add
2966  // these nodes to their shared node lookup schemes with processors
2967  // that they currently "can't see"].
2968 
2969  // Data to be sent to each processor
2970  Vector<int> send_n(n_proc, 0);
2971 
2972  // Storage for all values to be sent to all processors
2973  Vector<unsigned> send_data;
2974 
2975  // Start location within send_data for data to be sent to each processor
2976  Vector<int> send_displacement(n_proc, 0);
2977 
2978 
2979  // Loop over haloed nodes with other domains
2980  for (int domain = 0; domain < n_proc; domain++)
2981  {
2982  // Set the offset for the current processor
2983  send_displacement[domain] = send_data.size();
2984 
2985  // Every processor works on haloed nodes with proc "domain" and
2986  // sends associations across to that domain. No need to talk to
2987  // yourself...
2988  if (domain != my_rank)
2989  {
2990  // Send total number of global haloed nodes
2991  // send_data.push_back(global_haloed_count);
2992 
2993  // Loop over haloed nodes
2994  unsigned nnod_haloed = this->nhaloed_node(domain);
2995  for (unsigned j = 0; j < nnod_haloed; j++)
2996  {
2997  Node* nod_pt = this->haloed_node_pt(domain, j);
2998 
2999  // Send global ID of haloed node
3000  send_data.push_back(global_haloed_node_number_plus_one[nod_pt] - 1);
3001 
3002  // Get the set of domains that halo this node
3003  std::set<int> tmp_shared_domain_set = shared_domain_set[nod_pt];
3004 
3005  // Insert number of shared domains into send data
3006  unsigned n_shared_domains = tmp_shared_domain_set.size();
3007  send_data.push_back(n_shared_domains);
3008 
3009  // Add shared domains
3010  for (std::set<int>::iterator it = tmp_shared_domain_set.begin();
3011  it != tmp_shared_domain_set.end();
3012  it++)
3013  {
3014  send_data.push_back(*it);
3015  }
3016  }
3017  }
3018 
3019  // Find the number of data added to the vector
3020  send_n[domain] = send_data.size() - send_displacement[domain];
3021  }
3022 
3023 
3024  // Storage for the number of data to be received from each processor
3025  Vector<int> receive_n(n_proc, 0);
3026 
3027  // Now send numbers of data to be sent between all processors
3028  MPI_Alltoall(
3029  &send_n[0], 1, MPI_INT, &receive_n[0], 1, MPI_INT, Comm_pt->mpi_comm());
3030 
3031 
3032  // We now prepare the data to be received
3033  // by working out the displacements from the received data
3034  Vector<int> receive_displacement(n_proc, 0);
3035  int receive_data_count = 0;
3036  for (int rank = 0; rank < n_proc; ++rank)
3037  {
3038  // Displacement is number of data received so far
3039  receive_displacement[rank] = receive_data_count;
3040  receive_data_count += receive_n[rank];
3041  }
3042 
3043  // Now resize the receive buffer for all data from all processors
3044  // Make sure that it has a size of at least one
3045  if (receive_data_count == 0)
3046  {
3047  ++receive_data_count;
3048  }
3049  Vector<unsigned> receive_data(receive_data_count);
3050 
3051  // Make sure that the send buffer has size at least one
3052  // so that we don't get a segmentation fault
3053  if (send_data.size() == 0)
3054  {
3055  send_data.resize(1);
3056  }
3057 
3058  // Now send the data between all the processors
3059  MPI_Alltoallv(&send_data[0],
3060  &send_n[0],
3061  &send_displacement[0],
3062  MPI_UNSIGNED,
3063  &receive_data[0],
3064  &receive_n[0],
3065  &receive_displacement[0],
3066  MPI_UNSIGNED,
3067  Comm_pt->mpi_comm());
3068 
3069 
3071  {
3072  tt_end = TimingHelpers::timer();
3073  oomph_info << "Time for alltoall in Mesh::synchronise_shared_nodes(): "
3074  << tt_end - tt_start << std::endl;
3075  tt_start = TimingHelpers::timer();
3076  }
3077 
3078 
3080  {
3081  oomph_info << "Starting vector to set conversion in "
3082  << "Mesh::synchronise_shared_nodes() for a total of "
3083  << nshared_node() << " nodes\n";
3084  tt_start = TimingHelpers::timer();
3085  }
3086 
3087 
3088  // Copy vector-based representation of shared nodes into
3089  // sets for faster search
3090  Vector<std::set<Node*>> shared_node_set(n_proc);
3091  for (int d = 0; d < n_proc; d++)
3092  {
3093  unsigned n_vector = Shared_node_pt[d].size();
3094  for (unsigned i = 0; i < n_vector; i++)
3095  {
3096  shared_node_set[d].insert(Shared_node_pt[d][i]);
3097  }
3098  }
3099 
3100 
3102  {
3103  tt_end = TimingHelpers::timer();
3104  oomph_info
3105  << "Time for vector to set in Mesh::synchronise_shared_nodes(): "
3106  << tt_end - tt_start << std::endl;
3107  tt_start = TimingHelpers::timer();
3108  }
3109 
3110 
3111  // Now use the received data
3112  for (int send_rank = 0; send_rank < n_proc; send_rank++)
3113  {
3114  // Don't bother to do anything for the processor corresponding to the
3115  // current processor or if no data were received from this processor
3116  if ((send_rank != my_rank) && (receive_n[send_rank] != 0))
3117  {
3118  // Counter for the data within the large array
3119  unsigned count = receive_displacement[send_rank];
3120 
3121  // Read total number of global haloed nodes
3122  // unsigned n_global_haloed_nodes_on_send_proc=receive_data[count++];
3123 
3124  // Storage for nodes and associated domains:
3125  // domains_map[global_haloed_node_number].first = node
3126  // domains_map[global_haloed_node_number].second = set of domains
3127  // this node is
3128  // associated with.
3129  std::map<unsigned, std::pair<Node*, std::set<unsigned>>> domains_map;
3130 
3131  // Loop over halo nodes with sending processor
3132  unsigned nnod_halo = this->nhalo_node(send_rank);
3133  for (unsigned j = 0; j < nnod_halo; j++)
3134  {
3135  Node* nod_pt = this->halo_node_pt(send_rank, j);
3136 
3137  // Read unique ID of haloed node on send proc
3138  unsigned haloed_node_id_on_send_proc = receive_data[count++];
3139 
3140  // Read out number of shared domains into send data
3141  unsigned n_shared_domains = receive_data[count++];
3142 
3143  // Prepare set of domains
3144  std::set<unsigned> domain_set;
3145 
3146  // Read 'em
3147  for (unsigned i = 0; i < n_shared_domains; i++)
3148  {
3149  int shared_domain = receive_data[count++];
3150 
3151  // Record in set
3152  domain_set.insert(shared_domain);
3153  }
3154 
3155  // Add entry:
3156  domains_map[haloed_node_id_on_send_proc] =
3157  std::make_pair(nod_pt, domain_set);
3158 
3159  } // end of loop over halo nodes
3160 
3161 
3162  // Now add new shared nodes in order
3163 #ifdef PARANOID
3164  int previous_one = -1;
3165 #endif
3166  for (std::map<unsigned, std::pair<Node*, std::set<unsigned>>>::iterator
3167  it = domains_map.begin();
3168  it != domains_map.end();
3169  it++)
3170  {
3171  // Super-paranoid: Check that the map actually sorted entries
3172  // by key (as it should)
3173 #ifdef PARANOID
3174  if (int((*it).first) < previous_one)
3175  {
3176  std::ostringstream error_stream;
3177  error_stream << "Map did not store entries in order of key\n "
3178  << "Current key: " << (*it).first
3179  << "; previous one: " << previous_one << "\n"
3180  << "Need to rewrite this...\n";
3181  throw OomphLibError(error_stream.str(),
3182  OOMPH_CURRENT_FUNCTION,
3183  OOMPH_EXCEPTION_LOCATION);
3184  }
3185  previous_one = (*it).first;
3186 #endif
3187 
3188  // Extract node
3189  Node* nod_pt = (*it).second.first;
3190 
3191  // Extract set of domains
3192  std::set<unsigned> domain_set((*it).second.second);
3193 
3194  // Read 'em
3195  for (std::set<unsigned>::iterator itt = domain_set.begin();
3196  itt != domain_set.end();
3197  itt++)
3198  {
3199  int shared_domain = (*itt);
3200 
3201  // No need to add shared nodes with oneself!
3202  if (shared_domain != my_rank)
3203  {
3204  // Is node already listed in shared node scheme? Find it
3205  // and get iterator to entry
3206  std::set<Node*>::iterator ittt =
3207  shared_node_set[shared_domain].find(nod_pt);
3208 
3209  // If it's not in there already iterator points to end of
3210  // set
3211  if (ittt == shared_node_set[shared_domain].end())
3212  {
3213  // Now add it
3214  add_shared_node_pt(shared_domain, nod_pt);
3215 
3216  // Update set
3217  shared_node_set[shared_domain].insert(nod_pt);
3218  }
3219  }
3220  }
3221  }
3222 
3223  } // end of any data is received and ignore own domain
3224 
3225  } // end of loop over send ranks
3226 
3227 
3228 #ifdef PARANOID
3229  // Has some twit filled in shared nodes with own process?!
3230  // Check at end pf function.
3231  if (Shared_node_pt[my_rank].size() != 0)
3232  {
3233  throw OomphLibError(
3234  "Processor has shared nodes with itself! Something's gone wrong!",
3235  OOMPH_CURRENT_FUNCTION,
3236  OOMPH_EXCEPTION_LOCATION);
3237  }
3238 #endif
3239 
3240 
3242  {
3243  tt_end = TimingHelpers::timer();
3244  oomph_info
3245  << "Time for final processing in Mesh::synchronise_shared_nodes(): "
3246  << tt_end - tt_start << std::endl;
3247  tt_start = TimingHelpers::timer();
3248  }
3249 
3251  {
3252  double t_end = TimingHelpers::timer();
3253  oomph_info << "Total time for Mesh::synchronise_shared_nodes(): "
3254  << t_end - t_start << std::endl;
3255  }
3256  }
3257 
3258 
3259  //========================================================================
3260  /// Classify all halo and haloed information in the mesh
3261  //========================================================================
3263  const bool& report_stats)
3264  {
3265  // MemoryUsage::doc_memory_usage(
3266  // "at beginning of Mesh::classify_halo_and_haloed_nodes");
3267 
3268  double t_start = 0.0;
3270  {
3271  t_start = TimingHelpers::timer();
3272  }
3273 
3274  double tt_start = 0.0;
3276  {
3277  tt_start = TimingHelpers::timer();
3278  }
3279 
3280  // Set up shared nodes scheme
3282 
3283  // MemoryUsage::doc_memory_usage("after setup shared node scheme");
3284 
3285  double tt_end = 0.0;
3287  {
3288  tt_end = TimingHelpers::timer();
3289  oomph_info << "Time for Mesh::setup_shared_node_scheme() "
3290  << " Mesh::classify_halo_and_haloed_nodes(): "
3291  << tt_end - tt_start << std::endl;
3292  oomph_info.stream_pt()->flush();
3293  tt_start = TimingHelpers::timer();
3294  }
3295 
3296 
3297  // Wipe existing storage schemes for halo(ed) nodes
3298  Halo_node_pt.clear();
3299  Haloed_node_pt.clear();
3300 
3301  // Storage for number of processors and current processor
3302  int n_proc = Comm_pt->nproc();
3303  int my_rank = Comm_pt->my_rank();
3304  MPI_Status status;
3305 
3306  // Determine which processors the nodes are associated with
3307  // and hence who's in charge
3308  std::map<Data*, std::set<unsigned>> processors_associated_with_data;
3309  std::map<Data*, unsigned> processor_in_charge;
3310 
3311  // Loop over all processors and associate any nodes on the halo
3312  // elements involved with that processor
3313  for (int domain = 0; domain < n_proc; domain++)
3314  {
3315  // Get vector of halo elements by copy operation
3316  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(domain));
3317 
3318  // Loop over halo elements associated with this adjacent domain
3319  unsigned nelem = halo_elem_pt.size();
3320  for (unsigned e = 0; e < nelem; e++)
3321  {
3322  // Get element only have nodes if a finite element
3323  FiniteElement* finite_el_pt =
3324  dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
3325  if (finite_el_pt != 0)
3326  {
3327  // Loop over nodes
3328  unsigned nnod = finite_el_pt->nnode();
3329  for (unsigned j = 0; j < nnod; j++)
3330  {
3331  Node* nod_pt = finite_el_pt->node_pt(j);
3332  // Associate node with this domain
3333  processors_associated_with_data[nod_pt].insert(domain);
3334 
3335  // Do the same if the node is solid
3336  SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3337  if (solid_nod_pt != 0)
3338  {
3339  processors_associated_with_data[solid_nod_pt
3341  .insert(domain);
3342  }
3343  }
3344  }
3345  }
3346  }
3347 
3348 
3349  // Loop over all [non-halo] elements and associate their nodes
3350  // with current procesor
3351  unsigned nelem = this->nelement();
3352  for (unsigned e = 0; e < nelem; e++)
3353  {
3354  FiniteElement* finite_el_pt =
3355  dynamic_cast<FiniteElement*>(this->element_pt(e));
3356 
3357  // Only visit non-halos and finite elements
3358  if ((finite_el_pt != 0) && (!finite_el_pt->is_halo()))
3359  {
3360  // Loop over nodes
3361  unsigned nnod = finite_el_pt->nnode();
3362  for (unsigned j = 0; j < nnod; j++)
3363  {
3364  Node* nod_pt = finite_el_pt->node_pt(j);
3365 
3366  // Associate this node with current processor
3367  processors_associated_with_data[nod_pt].insert(my_rank);
3368 
3369  // do the same if we have a SolidNode
3370  SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3371  if (solid_nod_pt != 0)
3372  {
3373  processors_associated_with_data[solid_nod_pt
3375  .insert(my_rank);
3376  }
3377  }
3378  }
3379  }
3380 
3381 
3383  {
3384  tt_end = TimingHelpers::timer();
3385  oomph_info
3386  << "Time for setup loops in Mesh::classify_halo_and_haloed_nodes: "
3387  << tt_end - tt_start << std::endl;
3388  oomph_info.stream_pt()->flush();
3389  tt_start = TimingHelpers::timer();
3390  }
3391 
3392  // MemoryUsage::doc_memory_usage("after setup loops");
3393 
3394  // At this point we need to "synchronise" the nodes on halo(ed) elements
3395  // so that the processors_associated_with_data agrees for the same node
3396  // on all processors. Strategy: All local nodes have just had their
3397  // association recorded. Now loop over all haloed elements
3398  // and send the association of their nodes to the corresponding
3399  // halo processors where they update/augment the association of the
3400  // nodes of the corresponding halo elements.
3401 
3402  // Loop over all domains
3403  for (int d = 0; d < n_proc; d++)
3404  {
3405  // Prepare vector to send/receive
3406  Vector<unsigned> processors_associated_with_data_on_other_proc;
3407 
3408  if (d != my_rank)
3409  {
3410  // Communicate the processors associated with data on haloed elements
3411 
3412  // Get haloed elements
3413  Vector<GeneralisedElement*> haloed_elem_pt(this->haloed_element_pt(d));
3414 
3415  // Initialise counter for this haloed layer
3416  unsigned count_data = 0;
3417 
3418  // Loop over haloed elements
3419  unsigned n_haloed_elem = haloed_elem_pt.size();
3420  for (unsigned e = 0; e < n_haloed_elem; e++)
3421  {
3422  // Only nodes in finite elements
3423  FiniteElement* haloed_el_pt =
3424  dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
3425  if (haloed_el_pt != 0)
3426  {
3427  // Loop over nodes
3428  unsigned n_node = haloed_el_pt->nnode();
3429  for (unsigned j = 0; j < n_node; j++)
3430  {
3431  Node* nod_pt = haloed_el_pt->node_pt(j);
3432 
3433  // Number of processors associated with this node
3434  unsigned n_assoc = processors_associated_with_data[nod_pt].size();
3435 
3436  // This number needs to be sent
3437  processors_associated_with_data_on_other_proc.push_back(n_assoc);
3438  count_data++;
3439 
3440  // Now add the process IDs associated to the vector to be sent
3441  std::set<unsigned> procs_set =
3442  processors_associated_with_data[nod_pt];
3443  for (std::set<unsigned>::iterator it = procs_set.begin();
3444  it != procs_set.end();
3445  it++)
3446  {
3447  processors_associated_with_data_on_other_proc.push_back(*it);
3448  count_data++;
3449  }
3450  }
3451  }
3452  }
3453 
3454 
3455  // Send the information
3456  MPI_Send(&count_data, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
3457  if (count_data != 0)
3458  {
3459  MPI_Send(&processors_associated_with_data_on_other_proc[0],
3460  count_data,
3461  MPI_UNSIGNED,
3462  d,
3463  1,
3464  Comm_pt->mpi_comm());
3465  }
3466  }
3467  else
3468  {
3469  // Receive the processors associated with data onto halo elements
3470  for (int dd = 0; dd < n_proc; dd++)
3471  {
3472  if (dd != my_rank) // (my_rank=d)
3473  {
3474  // We will be looping over the halo elements with process dd
3475  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(dd));
3476  unsigned n_halo_elem = halo_elem_pt.size();
3477  unsigned count_data = 0;
3478  MPI_Recv(&count_data,
3479  1,
3480  MPI_UNSIGNED,
3481  dd,
3482  0,
3483  Comm_pt->mpi_comm(),
3484  &status);
3485  if (count_data != 0)
3486  {
3487  processors_associated_with_data_on_other_proc.resize(count_data);
3488  MPI_Recv(&processors_associated_with_data_on_other_proc[0],
3489  count_data,
3490  MPI_UNSIGNED,
3491  dd,
3492  1,
3493  Comm_pt->mpi_comm(),
3494  &status);
3495 
3496  // Reset counter and loop through nodes on halo elements
3497  count_data = 0;
3498  for (unsigned e = 0; e < n_halo_elem; e++)
3499  {
3500  FiniteElement* halo_el_pt =
3501  dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
3502  if (halo_el_pt != 0)
3503  {
3504  unsigned n_node = halo_el_pt->nnode();
3505  for (unsigned j = 0; j < n_node; j++)
3506  {
3507  Node* nod_pt = halo_el_pt->node_pt(j);
3508 
3509  // Get number of processors associated with data that was
3510  // sent
3511  unsigned n_assoc =
3512  processors_associated_with_data_on_other_proc[count_data];
3513  count_data++;
3514 
3515  for (unsigned i_assoc = 0; i_assoc < n_assoc; i_assoc++)
3516  {
3517  // Get the process ID
3518  unsigned sent_domain =
3519  processors_associated_with_data_on_other_proc
3520  [count_data];
3521  count_data++;
3522 
3523  // Add it to this processor's list of IDs
3524  processors_associated_with_data[nod_pt].insert(
3525  sent_domain);
3526 
3527  // If the node is solid then add the ID to the solid data
3528  SolidNode* solid_nod_pt =
3529  dynamic_cast<SolidNode*>(nod_pt);
3530  if (solid_nod_pt != 0)
3531  {
3532  processors_associated_with_data
3533  [solid_nod_pt->variable_position_pt()]
3534  .insert(sent_domain);
3535  }
3536  }
3537  }
3538  }
3539  }
3540  }
3541  }
3542  }
3543  }
3544  }
3545 
3547  {
3548  tt_end = TimingHelpers::timer();
3549  oomph_info
3550  << "Time for pt2pt send/recv in Mesh::classify_halo_and_haloed_nodes: "
3551  << tt_end - tt_start << std::endl;
3552  oomph_info.stream_pt()->flush();
3553  tt_start = TimingHelpers::timer();
3554  }
3555 
3556 
3557  // MemoryUsage::doc_memory_usage("after pt2pt send/recv");
3558 
3559  // Loop over all nodes on the present processor and put the highest-numbered
3560  // processor associated with each node "in charge" of the node
3561  unsigned nnod = this->nnode();
3562  for (unsigned j = 0; j < nnod; j++)
3563  {
3564  Node* nod_pt = this->node_pt(j);
3565 
3566  // Reset halo status of node to false
3567  nod_pt->set_nonhalo();
3568 
3569  // If it's a SolidNode then the halo status of the data
3570  // associated with that must also be reset to false
3571  SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3572  if (solid_nod_pt != 0)
3573  {
3574  solid_nod_pt->variable_position_pt()->set_nonhalo();
3575  }
3576 
3577  // Now put the highest-numbered one in charge
3578  unsigned proc_max = 0;
3579  std::set<unsigned> procs_set = processors_associated_with_data[nod_pt];
3580  for (std::set<unsigned>::iterator it = procs_set.begin();
3581  it != procs_set.end();
3582  it++)
3583  {
3584  if (*it > proc_max) proc_max = *it;
3585  }
3586  processor_in_charge[nod_pt] = proc_max;
3587 
3588  // Do the same if we have a SolidNode
3589  if (solid_nod_pt != 0)
3590  {
3591  // Now put the highest-numbered one in charge
3592  unsigned proc_max_solid = 0;
3593  std::set<unsigned> procs_set_solid =
3594  processors_associated_with_data[solid_nod_pt->variable_position_pt()];
3595  for (std::set<unsigned>::iterator it = procs_set_solid.begin();
3596  it != procs_set_solid.end();
3597  it++)
3598  {
3599  if (*it > proc_max_solid) proc_max_solid = *it;
3600  }
3601  processor_in_charge[solid_nod_pt->variable_position_pt()] =
3602  proc_max_solid;
3603  }
3604  }
3605 
3606 
3607  // First stab at determining halo nodes. They are located on the halo
3608  // elements and the processor in charge differs from the
3609  // current processor
3610 
3611  // Only count nodes once (map is initialised to 0 = false)
3612  std::map<Node*, bool> done;
3613 
3614  // Loop over all processors
3615  for (int domain = 0; domain < n_proc; domain++)
3616  {
3617  // Get vector of halo elements by copy operation
3618  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(domain));
3619 
3620  // Loop over halo elements associated with this adjacent domain
3621  unsigned nelem = halo_elem_pt.size();
3622 
3623  for (unsigned e = 0; e < nelem; e++)
3624  {
3625  // Get element
3626  GeneralisedElement* el_pt = halo_elem_pt[e];
3627 
3628  // Can if be cast to a finite element
3629  FiniteElement* finite_el_pt = dynamic_cast<FiniteElement*>(el_pt);
3630  if (finite_el_pt != 0)
3631  {
3632  // Loop over nodes
3633  unsigned nnod = finite_el_pt->nnode();
3634  for (unsigned j = 0; j < nnod; j++)
3635  {
3636  Node* nod_pt = finite_el_pt->node_pt(j);
3637 
3638  // Have we done this node already?
3639  if (!done[nod_pt])
3640  {
3641  // Is the other processor/domain in charge of this node?
3642  int proc_in_charge = processor_in_charge[nod_pt];
3643 
3644  if (proc_in_charge != my_rank)
3645  {
3646  // To keep the order of the nodes consistent with that
3647  // in the haloed node lookup scheme, only
3648  // allow it to be added when the current domain is in charge
3649  if (proc_in_charge == int(domain))
3650  {
3651  // Add it as being halo node whose non-halo counterpart
3652  // is located on processor proc_in_charge
3653  this->add_halo_node_pt(proc_in_charge, nod_pt);
3654 
3655  // The node itself needs to know it is a halo
3656  nod_pt->set_halo(proc_in_charge);
3657 
3658  // If it's a SolidNode then the data associated with that
3659  // must also be halo
3660  SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3661  if (solid_nod_pt != 0)
3662  {
3663  solid_nod_pt->variable_position_pt()->set_halo(
3664  proc_in_charge);
3665  }
3666 
3667  // We're done with this node
3668  done[nod_pt] = true;
3669  }
3670  }
3671  }
3672  }
3673 
3674  } // End of finite element case
3675 
3676  // Now make sure internal data on halo elements is also halo
3677  unsigned nintern_data = el_pt->ninternal_data();
3678  for (unsigned iintern = 0; iintern < nintern_data; iintern++)
3679  {
3680  el_pt->internal_data_pt(iintern)->set_halo(domain);
3681  }
3682  }
3683  }
3684 
3685 
3686  // First stab at determining haloed nodes. They are located on the haloed
3687  // elements and the processor in charge is the current processor
3688 
3689  // Loop over processors that share haloes with the current one
3690  for (int domain = 0; domain < n_proc; domain++)
3691  {
3692  // Only count nodes once (map is initialised to 0 = false)
3693  std::map<Node*, bool> node_done;
3694 
3695  // Get vector of haloed elements by copy operation
3696  Vector<GeneralisedElement*> haloed_elem_pt(
3697  this->haloed_element_pt(domain));
3698 
3699  // Loop over haloed elements associated with this adjacent domain
3700  unsigned nelem = haloed_elem_pt.size();
3701 
3702  for (unsigned e = 0; e < nelem; e++)
3703  {
3704  // Get element
3705  GeneralisedElement* el_pt = haloed_elem_pt[e];
3706 
3707  // Can it be cast to a finite element
3708  FiniteElement* finite_el_pt = dynamic_cast<FiniteElement*>(el_pt);
3709  if (finite_el_pt != 0)
3710  {
3711  // Loop over nodes
3712  unsigned nnod = finite_el_pt->nnode();
3713  for (unsigned j = 0; j < nnod; j++)
3714  {
3715  Node* nod_pt = finite_el_pt->node_pt(j);
3716 
3717  // Have we done this node already?
3718  if (!node_done[nod_pt])
3719  {
3720  // Is the current processor/domain in charge of this node?
3721  int proc_in_charge = processor_in_charge[nod_pt];
3722 
3723  if (proc_in_charge == my_rank)
3724  {
3725  // Add it as being haloed from specified domain
3726  this->add_haloed_node_pt(domain, nod_pt);
3727 
3728  // We're done with this node
3729  node_done[nod_pt] = true;
3730  }
3731  }
3732  }
3733  }
3734  }
3735  }
3736 
3737 
3739  {
3740  tt_end = TimingHelpers::timer();
3741  oomph_info
3742  << "Time for first classific in Mesh::classify_halo_and_haloed_nodes: "
3743  << tt_end - tt_start << std::endl;
3744  oomph_info.stream_pt()->flush();
3745  tt_start = TimingHelpers::timer();
3746  }
3747 
3748  // MemoryUsage::doc_memory_usage("after first classific");
3749 
3750 
3751  // Find any overlooked halo nodes: These are any nodes on the halo/haloed
3752  // elements (i.e. precisely the nodes currently contained in the shared
3753  // node scheme) that have not been classified as haloes (yet) though they
3754  // should have been because another processor is in charge of them.
3755  // This arises when the "overlooked halo node" is not part of the
3756  // halo/haloed element lookup scheme between the current processor
3757  // and the processor that holds the non-halo counterpart. E.g. we're
3758  // on proc 3. A node at the very edge of its halo layer also exists
3759  // on procs 0 and 1 with 1 being "in charge". However, the node in
3760  // question is not part of the halo/haloed element lookup scheme between
3761  // processor 1 and 3 so in the classification performed above, we never
3762  // visit it so it's overlooked. The code below rectifies this by going
3763  // through the intermediate processor (here proc 0) that contains the node
3764  // in lookup schemes with the halo processor (here proc 3, this one) and the
3765  // one that contains the non-halo counterpart (here proc 1).
3766 
3767 
3768  // Counter for number of overlooked halos (if there aren't any we don't
3769  // need any comms below)
3770  unsigned n_overlooked_halo = 0;
3771 
3772  // Record previously overlooked halo nodes so they can be
3773  // added to the shared node lookup scheme (in a consistent order) below
3774  Vector<Vector<Node*>> over_looked_halo_node_pt(n_proc);
3775 
3776  // Record previously overlooked haloed nodes so they can be
3777  // added to the shared node lookup scheme (in a consistent order) below
3778  Vector<Vector<Node*>> over_looked_haloed_node_pt(n_proc);
3779 
3780  // Data to be sent to each processor
3781  Vector<int> send_n(n_proc, 0);
3782 
3783  // Storage for all values to be sent to all processors
3784  Vector<int> send_data;
3785 
3786  // Start location within send_data for data to be sent to each processor
3787  Vector<int> send_displacement(n_proc, 0);
3788 
3789  // Check missing ones
3790  for (int domain = 0; domain < n_proc; domain++)
3791  {
3792  // Set the offset for the current processor
3793  send_displacement[domain] = send_data.size();
3794 
3795  // Don't bother to do anything if the processor in the loop is the
3796  // current processor
3797  if (domain != my_rank)
3798  {
3799  unsigned nnod = nshared_node(domain);
3800  for (unsigned j = 0; j < nnod; j++)
3801  {
3802  Node* nod_pt = shared_node_pt(domain, j);
3803 
3804  // Is a different-numbered processor/domain in charge of this node?
3805  int proc_in_charge = processor_in_charge[nod_pt];
3806  if ((proc_in_charge != my_rank) && !(nod_pt->is_halo()))
3807  {
3808  // Add it as being halo node whose non-halo counterpart
3809  // is located on processor proc_in_charge
3810  this->add_halo_node_pt(proc_in_charge, nod_pt);
3811 
3812  // We have another one...
3813  n_overlooked_halo++;
3814  over_looked_halo_node_pt[proc_in_charge].push_back(nod_pt);
3815 
3816  // The node itself needs to know it is a halo
3817  nod_pt->set_halo(proc_in_charge);
3818 
3819  // If it's a SolidNode then the data associated with that
3820  // must also be halo
3821  SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3822  if (solid_nod_pt != 0)
3823  {
3824  solid_nod_pt->variable_position_pt()->set_halo(proc_in_charge);
3825  }
3826 
3827  // Send shared node ID and processor in charge info to
3828  // "intermediate" processor (i.e. the processor that has
3829  // the lookup schemes to talk to both
3830  send_data.push_back(j);
3831  send_data.push_back(proc_in_charge);
3832  }
3833  }
3834  }
3835 
3836  // End of data
3837  send_data.push_back(-1);
3838 
3839  // Find the number of data added to the vector
3840  send_n[domain] = send_data.size() - send_displacement[domain];
3841  }
3842 
3843 
3844  // Check if any processor has stumbled across overlooked halos
3845  // (if not we can omit the comms below)
3846  unsigned global_max_n_overlooked_halo = 0;
3847  MPI_Allreduce(&n_overlooked_halo,
3848  &global_max_n_overlooked_halo,
3849  1,
3850  MPI_UNSIGNED,
3851  MPI_MAX,
3852  Comm_pt->mpi_comm());
3853 
3854 
3855  oomph_info << "Global max number of overlooked haloes: "
3856  << global_max_n_overlooked_halo << std::endl;
3857 
3859  {
3860  tt_end = TimingHelpers::timer();
3861  oomph_info << "Time for setup 1st alltoalls in "
3862  "Mesh::classify_halo_and_haloed_nodes: "
3863  << tt_end - tt_start << std::endl;
3864  oomph_info.stream_pt()->flush();
3865  tt_start = TimingHelpers::timer();
3866  }
3867 
3868  // MemoryUsage::doc_memory_usage("after setup 1st alltoalls");
3869 
3870  // Any comms needed?
3871  if (global_max_n_overlooked_halo > 0)
3872  {
3873  // Storage for the number of data to be received from each processor
3874  Vector<int> receive_n(n_proc, 0);
3875 
3876  // Now send numbers of data to be sent between all processors
3877  MPI_Alltoall(
3878  &send_n[0], 1, MPI_INT, &receive_n[0], 1, MPI_INT, Comm_pt->mpi_comm());
3879 
3880 
3882  {
3883  tt_end = TimingHelpers::timer();
3884  oomph_info
3885  << "Time for 1st alltoall in Mesh::classify_halo_and_haloed_nodes: "
3886  << tt_end - tt_start << std::endl;
3887  oomph_info.stream_pt()->flush();
3888  tt_start = TimingHelpers::timer();
3889  }
3890 
3891  // MemoryUsage::doc_memory_usage("after 1st alltoall");
3892 
3893 
3894  // We now prepare the data to be received
3895  // by working out the displacements from the received data
3896  Vector<int> receive_displacement(n_proc, 0);
3897  int receive_data_count = 0;
3898  for (int rank = 0; rank < n_proc; ++rank)
3899  {
3900  // Displacement is number of data received so far
3901  receive_displacement[rank] = receive_data_count;
3902  receive_data_count += receive_n[rank];
3903  }
3904 
3905  // Now resize the receive buffer for all data from all processors
3906  // Make sure that it has a size of at least one
3907  if (receive_data_count == 0)
3908  {
3909  ++receive_data_count;
3910  }
3911  Vector<int> receive_data(receive_data_count);
3912 
3913  // Make sure that the send buffer has size at least one
3914  // so that we don't get a segmentation fault
3915  if (send_data.size() == 0)
3916  {
3917  send_data.resize(1);
3918  }
3919 
3920  // Now send the data between all the processors
3921  MPI_Alltoallv(&send_data[0],
3922  &send_n[0],
3923  &send_displacement[0],
3924  MPI_INT,
3925  &receive_data[0],
3926  &receive_n[0],
3927  &receive_displacement[0],
3928  MPI_INT,
3929  Comm_pt->mpi_comm());
3930 
3931 
3933  {
3934  tt_end = TimingHelpers::timer();
3935  oomph_info
3936  << "Time for 2nd alltoall in Mesh::classify_halo_and_haloed_nodes: "
3937  << tt_end - tt_start << std::endl;
3938  oomph_info.stream_pt()->flush();
3939  tt_start = TimingHelpers::timer();
3940  }
3941 
3942  // MemoryUsage::doc_memory_usage("after 2nd alltoall");
3943 
3944  // Provide storage for data to be sent to processor that used to be
3945  // in charge
3946  Vector<Vector<int>> send_data_for_proc_in_charge(n_proc);
3947 
3948  // Now use the received data
3949  for (int send_rank = 0; send_rank < n_proc; send_rank++)
3950  {
3951  // Don't bother to do anything for the processor corresponding to the
3952  // current processor or if no data were received from this processor
3953  if ((send_rank != my_rank) && (receive_n[send_rank] != 0))
3954  {
3955  // Counter for the data within the large array
3956  unsigned count = receive_displacement[send_rank];
3957 
3958  // Unpack until we reach "end of data" indicator (-1)
3959  while (true)
3960  {
3961  // Read next entry
3962  int next_one = receive_data[count++];
3963 
3964  if (next_one == -1)
3965  {
3966  break;
3967  }
3968  else
3969  {
3970  // Shared halo node number in lookup scheme between intermediate
3971  // (i.e. this) processor and the one that has the overlooked halo
3972  unsigned j = unsigned(next_one);
3973 
3974  // Processor in charge:
3975  unsigned proc_in_charge = unsigned(receive_data[count++]);
3976 
3977  // Find actual node from shared node lookup scheme
3978  Node* nod_pt = shared_node_pt(send_rank, j);
3979 
3980 
3981  // Note: This search seems relatively cheap
3982  // and in the tests done, did not benefit
3983  // from conversion to map-based search
3984  // as in
3985  // TreeBasedRefineableMeshBase::synchronise_hanging_nodes()
3986 
3987 
3988  // Locate its index in lookup scheme with proc in charge
3989  bool found = false;
3990  unsigned nnod = nshared_node(proc_in_charge);
3991  for (unsigned jj = 0; jj < nnod; jj++)
3992  {
3993  if (nod_pt == shared_node_pt(proc_in_charge, jj))
3994  {
3995  found = true;
3996 
3997  // Shared node ID in lookup scheme with intermediate (i.e.
3998  // this) processor
3999  send_data_for_proc_in_charge[proc_in_charge].push_back(jj);
4000 
4001  // Processor that holds the overlooked halo node
4002  send_data_for_proc_in_charge[proc_in_charge].push_back(
4003  send_rank);
4004 
4005  break;
4006  }
4007  }
4008  if (!found)
4009  {
4010  std::ostringstream error_stream;
4011  error_stream
4012  << "Failed to find node that is shared node " << j
4013  << " (with processor " << send_rank
4014  << ") \n in shared node lookup scheme with processor "
4015  << proc_in_charge << " which is in charge.\n";
4016  throw OomphLibError(error_stream.str(),
4017  OOMPH_CURRENT_FUNCTION,
4018  OOMPH_EXCEPTION_LOCATION);
4019  }
4020  }
4021  }
4022  }
4023  } // End of data is received
4024 
4025 
4027  {
4028  tt_end = TimingHelpers::timer();
4029  oomph_info << "Time for 1st setup 3rd alltoall in "
4030  "Mesh::classify_halo_and_haloed_nodes: "
4031  << tt_end - tt_start << std::endl;
4032  oomph_info.stream_pt()->flush();
4033  tt_start = TimingHelpers::timer();
4034  }
4035 
4036  // MemoryUsage::doc_memory_usage("after 1st setup for 3rd alltoall");
4037 
4038  // Data to be sent to each processor
4039  Vector<int> all_send_n(n_proc, 0);
4040 
4041  // Storage for all values to be sent to all processors
4042  Vector<int> all_send_data;
4043 
4044  // Start location within send_data for data to be sent to each processor
4045  Vector<int> all_send_displacement(n_proc, 0);
4046 
4047  // Collate data
4048  for (int domain = 0; domain < n_proc; domain++)
4049  {
4050  // Set the offset for the current processor
4051  all_send_displacement[domain] = all_send_data.size();
4052 
4053  // Don't bother to do anything if the processor in the loop is the
4054  // current processor
4055  if (domain != my_rank)
4056  {
4057  unsigned n = send_data_for_proc_in_charge[domain].size();
4058  for (unsigned j = 0; j < n; j++)
4059  {
4060  all_send_data.push_back(send_data_for_proc_in_charge[domain][j]);
4061  }
4062  }
4063 
4064  // End of data
4065  all_send_data.push_back(-1);
4066 
4067  // Find the number of data added to the vector
4068  all_send_n[domain] =
4069  all_send_data.size() - all_send_displacement[domain];
4070  }
4071 
4072 
4074  {
4075  tt_end = TimingHelpers::timer();
4076  oomph_info << "Time for 2nd setup 3rd alltoall in "
4077  "Mesh::classify_halo_and_haloed_nodes: "
4078  << tt_end - tt_start << std::endl;
4079  oomph_info.stream_pt()->flush();
4080  tt_start = TimingHelpers::timer();
4081  }
4082 
4083  // MemoryUsage::doc_memory_usage("after 2nd setup 3rd alltoall");
4084 
4085  // Storage for the number of data to be received from each processor
4086  Vector<int> all_receive_n(n_proc, 0);
4087 
4088  // Now send numbers of data to be sent between all processors
4089  MPI_Alltoall(&all_send_n[0],
4090  1,
4091  MPI_INT,
4092  &all_receive_n[0],
4093  1,
4094  MPI_INT,
4095  Comm_pt->mpi_comm());
4096 
4097 
4099  {
4100  tt_end = TimingHelpers::timer();
4101  oomph_info
4102  << "Time for 3rd alltoall in Mesh::classify_halo_and_haloed_nodes: "
4103  << tt_end - tt_start << std::endl;
4104  oomph_info.stream_pt()->flush();
4105  tt_start = TimingHelpers::timer();
4106  }
4107 
4108  // MemoryUsage::doc_memory_usage("after 3rd alltoall");
4109 
4110  // We now prepare the data to be received
4111  // by working out the displacements from the received data
4112  Vector<int> all_receive_displacement(n_proc, 0);
4113  int all_receive_data_count = 0;
4114 
4115  for (int rank = 0; rank < n_proc; ++rank)
4116  {
4117  // Displacement is number of data received so far
4118  all_receive_displacement[rank] = all_receive_data_count;
4119  all_receive_data_count += all_receive_n[rank];
4120  }
4121 
4122  // Now resize the receive buffer for all data from all processors
4123  // Make sure that it has a size of at least one
4124  if (all_receive_data_count == 0)
4125  {
4126  ++all_receive_data_count;
4127  }
4128  Vector<int> all_receive_data(all_receive_data_count);
4129 
4130  // Make sure that the send buffer has size at least one
4131  // so that we don't get a segmentation fault
4132  if (all_send_data.size() == 0)
4133  {
4134  all_send_data.resize(1);
4135  }
4136 
4137  // Now send the data between all the processors
4138  MPI_Alltoallv(&all_send_data[0],
4139  &all_send_n[0],
4140  &all_send_displacement[0],
4141  MPI_INT,
4142  &all_receive_data[0],
4143  &all_receive_n[0],
4144  &all_receive_displacement[0],
4145  MPI_INT,
4146  Comm_pt->mpi_comm());
4147 
4148 
4150  {
4151  tt_end = TimingHelpers::timer();
4152  oomph_info
4153  << "Time for 4th alltoall in Mesh::classify_halo_and_haloed_nodes: "
4154  << tt_end - tt_start << std::endl;
4155  oomph_info.stream_pt()->flush();
4156  tt_start = TimingHelpers::timer();
4157  }
4158 
4159  // MemoryUsage::doc_memory_usage("after 4th alltoall");
4160 
4161 
4162  // Now use the received data
4163  for (int send_rank = 0; send_rank < n_proc; send_rank++)
4164  {
4165  // Don't bother to do anything for the processor corresponding to the
4166  // current processor or if no data were received from this processor
4167  if ((send_rank != my_rank) && (all_receive_n[send_rank] != 0))
4168  {
4169  // Counter for the data within the large array
4170  unsigned count = all_receive_displacement[send_rank];
4171 
4172  // Unpack until we reach "end of data" indicator (-1)
4173  while (true)
4174  {
4175  // Read next entry
4176  int next_one = all_receive_data[count++];
4177 
4178  if (next_one == -1)
4179  {
4180  break;
4181  }
4182  else
4183  {
4184  // Shared node ID in lookup scheme with intermediate (sending)
4185  // processor
4186  unsigned j = unsigned(next_one);
4187 
4188  // Get pointer to previously overlooked halo
4189  Node* nod_pt = shared_node_pt(send_rank, j);
4190 
4191  // Proc where overlooked halo is
4192  unsigned proc_with_overlooked_halo = all_receive_data[count++];
4193 
4194  // Add it as being haloed from specified domain
4195  this->add_haloed_node_pt(proc_with_overlooked_halo, nod_pt);
4196 
4197  // Record new haloed node so it an be added to the shared
4198  // node lookup scheme (in a consistent order) below.
4199  over_looked_haloed_node_pt[proc_with_overlooked_halo].push_back(
4200  nod_pt);
4201  }
4202  }
4203  }
4204  } // End of data is received
4205 
4207  {
4208  tt_end = TimingHelpers::timer();
4209  oomph_info << "Time for postproc 4th alltoall in "
4210  "Mesh::classify_halo_and_haloed_nodes: "
4211  << tt_end - tt_start << std::endl;
4212  oomph_info.stream_pt()->flush();
4213  tt_start = TimingHelpers::timer();
4214  }
4215 
4216  // MemoryUsage::doc_memory_usage("after postprocess 4th alltoall");
4217 
4218  // Now add previously overlooked halo/haloed nodes to shared node
4219  // lookup scheme in consistent order
4220  for (int d = 0; d < n_proc; d++)
4221  {
4222  // For all domains lower than the current domain: Do halos first
4223  // then haloed, to ensure correct order in lookup scheme from
4224  // the other side
4225  if (d < my_rank)
4226  {
4227  unsigned nnod = over_looked_halo_node_pt[d].size();
4228  for (unsigned j = 0; j < nnod; j++)
4229  {
4230  this->add_shared_node_pt(d, over_looked_halo_node_pt[d][j]);
4231  }
4232  nnod = over_looked_haloed_node_pt[d].size();
4233  for (unsigned j = 0; j < nnod; j++)
4234  {
4235  this->add_shared_node_pt(d, over_looked_haloed_node_pt[d][j]);
4236  }
4237  }
4238  else if (d > my_rank)
4239  {
4240  unsigned nnod = over_looked_haloed_node_pt[d].size();
4241  for (unsigned j = 0; j < nnod; j++)
4242  {
4243  this->add_shared_node_pt(d, over_looked_haloed_node_pt[d][j]);
4244  }
4245  nnod = over_looked_halo_node_pt[d].size();
4246  for (unsigned j = 0; j < nnod; j++)
4247  {
4248  this->add_shared_node_pt(d, over_looked_halo_node_pt[d][j]);
4249  }
4250  }
4251  }
4252 
4253 
4254  // Doc stats
4255  if (report_stats)
4256  {
4257  // Report total number of halo(ed) and shared nodes for this process
4258  oomph_info << "BEFORE SYNCHRONISE SHARED NODES Processor " << my_rank
4259  << " holds " << this->nnode() << " nodes of which "
4260  << this->nhalo_node() << " are halo nodes \n while "
4261  << this->nhaloed_node() << " are haloed nodes, and "
4262  << this->nshared_node() << " are shared nodes." << std::endl;
4263 
4264  // Report number of halo(ed) and shared nodes with each domain
4265  // from the current process
4266  for (int iproc = 0; iproc < n_proc; iproc++)
4267  {
4268  // Get vector of halo elements by copy operation
4269  Vector<GeneralisedElement*> halo_elem_pt(
4270  this->halo_element_pt(iproc));
4271  Vector<GeneralisedElement*> haloed_elem_pt(
4272  this->haloed_element_pt(iproc));
4273  oomph_info << "With process " << iproc << ", there are "
4274  << this->nhalo_node(iproc) << " halo nodes, and "
4275  << std::endl
4276  << this->nhaloed_node(iproc) << " haloed nodes, and "
4277  << this->nshared_node(iproc) << " shared nodes"
4278  << std::endl
4279  << halo_elem_pt.size() << " halo elements and "
4280  << haloed_elem_pt.size() << " haloed elements\n";
4281  }
4282  }
4283 
4284  // // Doc stats
4285  // if (report_stats)
4286  // {
4287  // // Report total number of halo(ed) and shared nodes for this process
4288  // oomph_info << "BEFORE SYNCHRONISE SHARED NODES Processor " <<
4289  // my_rank
4290  // << " holds " << this->nnode()
4291  // << " nodes of which " << this->nhalo_node()
4292  // << " are halo nodes \n while " << this->nhaloed_node()
4293  // << " are haloed nodes, and " << this->nshared_node()
4294  // << " are shared nodes." << std::endl;
4295 
4296  // // Report number of halo(ed) and shared nodes with each domain
4297  // // from the current process
4298  // for (int iproc=0;iproc<n_proc;iproc++)
4299  // {
4300  // // Get vector of halo elements by copy operation
4301  // Vector<GeneralisedElement*>
4302  // halo_elem_pt(this->halo_element_pt(iproc));
4303  // Vector<GeneralisedElement*> haloed_elem_pt(
4304  // this->haloed_element_pt(iproc));
4305  // oomph_info << "With process " << iproc << ", there are "
4306  // << this->nhalo_node(iproc) << " halo nodes, and " <<
4307  // std::endl
4308  // << this->nhaloed_node(iproc) << " haloed nodes, and "
4309  // << this->nshared_node(iproc) << " shared nodes" <<
4310  // std::endl
4311  // << halo_elem_pt.size() << " halo elements and "
4312  // << haloed_elem_pt.size() << " haloed elements\n";
4313  // }
4314  // }
4315 
4316  } // end if comms reqd because we encountered overlooked halo elements
4317 
4318 
4319  // MemoryUsage::doc_memory_usage("before sync halo nodes");
4320 
4321  // Synchronise shared nodes
4322  synchronise_shared_nodes(report_stats);
4323 
4324  // MemoryUsage::doc_memory_usage("after sync halo nodes");
4325 
4326 #ifdef PARANOID
4327  // Has some twit filled in haloed nodes with own process?!
4328  if (Haloed_node_pt[my_rank].size() != 0)
4329  {
4330  throw OomphLibError(
4331  "Processor has haloed nodes with itself! Something's gone wrong!",
4332  OOMPH_CURRENT_FUNCTION,
4333  OOMPH_EXCEPTION_LOCATION);
4334  }
4335  // Has some twit filled in halo nodes with own process?!
4336  if (Halo_node_pt[my_rank].size() != 0)
4337  {
4338  throw OomphLibError(
4339  "Processor has halo nodes with itself! Something's gone wrong!",
4340  OOMPH_CURRENT_FUNCTION,
4341  OOMPH_EXCEPTION_LOCATION);
4342  }
4343  // Has some twit filled in root haloed elements with own process?!
4344  if (Root_haloed_element_pt[my_rank].size() != 0)
4345  {
4346  throw OomphLibError("Processor has root haloed elements with itself! "
4347  "Something's gone wrong!",
4348  OOMPH_CURRENT_FUNCTION,
4349  OOMPH_EXCEPTION_LOCATION);
4350  }
4351  // Has some twit filled in root halo elements with own process?!
4352  if (Root_halo_element_pt[my_rank].size() != 0)
4353  {
4354  throw OomphLibError(
4355  "Processor has root halo elements with itself! Something's gone wrong!",
4356  OOMPH_CURRENT_FUNCTION,
4357  OOMPH_EXCEPTION_LOCATION);
4358  }
4359 #endif
4360 
4361  // Doc stats
4362  if (report_stats)
4363  {
4364  // Report total number of halo(ed) and shared nodes for this process
4365  oomph_info << "Processor " << my_rank << " holds " << this->nnode()
4366  << " nodes of which " << this->nhalo_node()
4367  << " are halo nodes \n while " << this->nhaloed_node()
4368  << " are haloed nodes, and " << this->nshared_node()
4369  << " are shared nodes." << std::endl;
4370 
4371  // Report number of halo(ed) and shared nodes with each domain
4372  // from the current process
4373  for (int iproc = 0; iproc < n_proc; iproc++)
4374  {
4375  // Get vector of halo elements by copy operation
4376  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(iproc));
4377  Vector<GeneralisedElement*> haloed_elem_pt(
4378  this->haloed_element_pt(iproc));
4379  oomph_info << "With process " << iproc << ", there are "
4380  << this->nhalo_node(iproc) << " halo nodes, and "
4381  << std::endl
4382  << this->nhaloed_node(iproc) << " haloed nodes, and "
4383  << this->nshared_node(iproc) << " shared nodes" << std::endl
4384  << halo_elem_pt.size() << " halo elements and "
4385  << haloed_elem_pt.size() << " haloed elements\n";
4386  }
4387  }
4388 
4389 
4390  // MemoryUsage::doc_memory_usage("before resize halo nodes");
4391 
4392  // Now resize halo nodes if required (can be over-ruled from the outside
4393  // either by user (for (risky!) efficienty saving) or from overloaded
4394  // version of classify_... in refineable version of that function
4395  // where resize_halo_nodes() is called after synchronising hanging nodes.
4397  {
4399  }
4400 
4401  // MemoryUsage::doc_memory_usage("after resize halo nodes");
4402 
4404  {
4405  double t_end = TimingHelpers::timer();
4406  oomph_info << "Total time for Mesh::classify_halo_and_halo_nodes(): "
4407  << t_end - t_start << std::endl;
4408  oomph_info.stream_pt()->flush();
4409  }
4410 
4411  // MemoryUsage::doc_memory_usage(
4412  // "at end of Mesh::classify_halo_and_halo_nodes()");
4413  }
4414 
4415 
4416  //========================================================================
4417  /// Helper function that resizes halo nodes to the same
4418  /// size as their non-halo counterparts if required. (A discrepancy
4419  /// can arise if a FaceElement that introduces additional unknowns
4420  /// are attached to a bulk element that shares a node with a haloed element.
4421  /// In that case the joint node between haloed and non-haloed element
4422  /// is resized on that processor but not on the one that holds the
4423  /// halo counterpart (because no FaceElement is attached to the halo
4424  /// element)
4425  //=========================================================================
4427  {
4428  double t_start = 0.0;
4430  {
4431  t_start = TimingHelpers::timer();
4432  }
4433 
4434  MPI_Status status;
4435 
4436  // Nuffink needs to be done if mesh isn't distributed
4437  if (is_mesh_distributed())
4438  {
4439  // Storage for current processor and number of processors
4440  int n_proc = Comm_pt->nproc();
4441  int my_rank = Comm_pt->my_rank();
4442 
4443  // Loop over domains on which non-halo counter parts of my halo nodes live
4444  for (int d = 0; d < n_proc; d++)
4445  {
4446  // On current processor: Receive data for my halo nodes with proc d.
4447  // Elsewhere: Send haloed data with proc d.
4448  if (d == my_rank)
4449  {
4450  // Loop over domains that hold non-halo counterparts of my halo nodes
4451  for (int dd = 0; dd < n_proc; dd++)
4452  {
4453  // Don't talk to yourself
4454  if (dd != d)
4455  {
4456  // How many of my nodes are halo nodes whose non-halo
4457  // counterpart is located on processor dd?
4458  int nnod_halo = this->nhalo_node(dd);
4459  int nnod_ext_halo = this->nexternal_halo_node(dd);
4460  if ((nnod_halo + nnod_ext_halo) != 0)
4461  {
4462  // Receive from processor dd number of haloed nodes (entry 0),
4463  // external haloed nodes (entry 1) and total number of
4464  // unsigneds to be sent below (entry 2)
4465  Vector<int> tmp(3);
4466  MPI_Recv(
4467  &tmp[0], 3, MPI_INT, dd, 0, Comm_pt->mpi_comm(), &status);
4468 
4469 #ifdef PARANOID
4470  // Check that number of halo/haloed nodes match
4471  int nnod_haloed = tmp[0];
4472  if (nnod_haloed != nnod_halo)
4473  {
4474  std::ostringstream error_message;
4475  error_message << "Clash in numbers of halo and haloed nodes "
4476  << std::endl;
4477  error_message << " between procs " << dd << " and " << d
4478  << ": " << nnod_haloed << " " << nnod_halo
4479  << std::endl;
4480  throw OomphLibError(error_message.str(),
4481  OOMPH_CURRENT_FUNCTION,
4482  OOMPH_EXCEPTION_LOCATION);
4483  }
4484 
4485  // Check that number of external halo/haloed nodes match
4486  int nnod_ext_haloed = tmp[1];
4487  if (nnod_ext_haloed != nnod_ext_halo)
4488  {
4489  std::ostringstream error_message;
4490  error_message
4491  << "Clash in numbers of external halo and haloed nodes "
4492  << std::endl;
4493  error_message << " between procs " << dd << " and " << d
4494  << ": " << nnod_ext_haloed << " "
4495  << nnod_ext_halo << std::endl;
4496  throw OomphLibError(error_message.str(),
4497  OOMPH_CURRENT_FUNCTION,
4498  OOMPH_EXCEPTION_LOCATION);
4499  }
4500 #endif
4501 
4502  // How many unsigneds are we about to receive
4503  unsigned n_rec = tmp[2];
4504 
4505  // Get strung-together data from other proc
4506  Vector<unsigned> unsigned_rec_data(n_rec);
4507  MPI_Recv(&unsigned_rec_data[0],
4508  n_rec,
4509  MPI_UNSIGNED,
4510  dd,
4511  0,
4512  Comm_pt->mpi_comm(),
4513  &status);
4514 
4515  // Step through the flat-packed unsigneds
4516  unsigned count = 0;
4517 
4518  // Normal and external halo nodes
4519  for (unsigned loop = 0; loop < 2; loop++)
4520  {
4521  unsigned hi_nod = nnod_halo;
4522  if (loop == 1)
4523  {
4524  hi_nod = nnod_ext_halo;
4525  }
4526  for (int j = 0; j < int(hi_nod); j++)
4527  {
4528  // Which node are we dealing with
4529  Node* nod_pt = 0;
4530  if (loop == 0)
4531  {
4532  nod_pt = this->halo_node_pt(dd, j);
4533  }
4534  else
4535  {
4536  nod_pt = this->external_halo_node_pt(dd, j);
4537  }
4538 
4539  // How many values do we have locally?
4540  unsigned nval_local = nod_pt->nvalue();
4541 
4542  // Read number of values on other side
4543  unsigned nval_other = unsigned_rec_data[count++];
4544 
4545  if (nval_local != nval_other)
4546  {
4547  nod_pt->resize(nval_other);
4548  }
4549 
4550  // Read number of entries in resize map
4551  unsigned nentry = unsigned_rec_data[count++];
4552  if (nentry != 0)
4553  {
4554  // Is current node a boundary node?
4555  BoundaryNodeBase* bnod_pt =
4556  dynamic_cast<BoundaryNodeBase*>(nod_pt);
4557 #ifdef PARANOID
4558  if (bnod_pt == 0)
4559  {
4560  throw OomphLibError(
4561  "Failed to cast node to boundary node even though "
4562  "we've received data for boundary node",
4563  OOMPH_CURRENT_FUNCTION,
4564  OOMPH_EXCEPTION_LOCATION);
4565  }
4566 #endif
4567 
4568  // Create storage for map if it doesn't already exist
4569  bool already_existed = true;
4570  if (
4571  bnod_pt
4572  ->index_of_first_value_assigned_by_face_element_pt() ==
4573  0)
4574  {
4575  bnod_pt
4577  new std::map<unsigned, unsigned>;
4578  already_existed = false;
4579  }
4580 
4581  // Get pointer to the map of indices associated with
4582  // additional values created by face elements
4583  std::map<unsigned, unsigned>* map_pt =
4584  bnod_pt
4586 
4587  // Loop over number of entries in map (as received)
4588  for (unsigned i = 0; i < nentry; i++)
4589  {
4590  // Read out pairs...
4591  unsigned first_received = unsigned_rec_data[count++];
4592  unsigned second_received = unsigned_rec_data[count++];
4593 
4594  // If it exists check that values are consistent:
4595  if (already_existed)
4596  {
4597 #ifdef PARANOID
4598  if ((*map_pt)[first_received] != second_received)
4599  {
4600  std::ostringstream error_message;
4601  error_message << "Existing map entry for map entry "
4602  << i << " for node located at ";
4603  unsigned n = nod_pt->ndim();
4604  for (unsigned ii = 0; ii < n; ii++)
4605  {
4606  error_message << nod_pt->position(ii) << " ";
4607  }
4608  error_message
4609  << "Key: " << first_received << " "
4610  << "Local value: " << (*map_pt)[first_received]
4611  << " "
4612  << "Received value: " << second_received
4613  << std::endl;
4614  throw OomphLibError(error_message.str(),
4615  OOMPH_CURRENT_FUNCTION,
4616  OOMPH_EXCEPTION_LOCATION);
4617  }
4618 #endif
4619  }
4620  // Else assign
4621  else
4622  {
4623  (*map_pt)[first_received] = second_received;
4624  }
4625  }
4626  }
4627  }
4628  }
4629  }
4630  }
4631  }
4632  }
4633  // Send my haloed nodes whose halo counterparts are located on processor
4634  // d
4635  else
4636  {
4637  // Storage for number of haloed nodes (entry 0), number of external
4638  // haloed nodes (entry 1) and total number of unsigneds to be sent
4639  // below (entry 2)
4640  Vector<int> tmp(3);
4641  int nnod_haloed = this->nhaloed_node(d);
4642  tmp[0] = nnod_haloed;
4643  int nnod_ext_haloed = this->nexternal_haloed_node(d);
4644  tmp[1] = nnod_ext_haloed;
4645  if ((nnod_haloed + nnod_ext_haloed) != 0)
4646  {
4647  // Now string together the data
4648  Vector<unsigned> unsigned_send_data;
4649 
4650  // Normal and external haloed nodes
4651  for (unsigned loop = 0; loop < 2; loop++)
4652  {
4653  unsigned hi_nod = nnod_haloed;
4654  if (loop == 1)
4655  {
4656  hi_nod = nnod_ext_haloed;
4657  }
4658  for (int j = 0; j < int(hi_nod); j++)
4659  {
4660  // Which node are we dealing with?
4661  Node* nod_pt = 0;
4662  if (loop == 0)
4663  {
4664  nod_pt = this->haloed_node_pt(d, j);
4665  }
4666  else
4667  {
4668  nod_pt = this->external_haloed_node_pt(d, j);
4669  }
4670 
4671  // Add number of values of node
4672  unsigned_send_data.push_back(nod_pt->nvalue());
4673 
4674  // Get pointer to the map of indices associated with
4675  // additional values created by face elements
4676 
4677  // Is it a boundary node?
4678  BoundaryNodeBase* bnod_pt =
4679  dynamic_cast<BoundaryNodeBase*>(nod_pt);
4680  if (bnod_pt == 0)
4681  {
4682  // Not a boundary node -- there are zero map entries to follow
4683  unsigned_send_data.push_back(0);
4684  }
4685  else
4686  {
4687  // It's a boundary node: Check if it's been resized
4688  std::map<unsigned, unsigned>* map_pt =
4690 
4691  // No additional values created -- there are zero map entries
4692  // to follow
4693  if (map_pt == 0)
4694  {
4695  unsigned_send_data.push_back(0);
4696  }
4697  // Created additional values
4698  else
4699  {
4700  // How many map entries were there
4701  unsigned_send_data.push_back(map_pt->size());
4702 
4703  // Loop over entries in map and add to send data
4704  for (std::map<unsigned, unsigned>::iterator p =
4705  map_pt->begin();
4706  p != map_pt->end();
4707  p++)
4708  {
4709  unsigned_send_data.push_back((*p).first);
4710  unsigned_send_data.push_back((*p).second);
4711  }
4712  }
4713  }
4714  }
4715  }
4716 
4717  // How many values are there in total?
4718  int n_send = unsigned_send_data.size();
4719  tmp[2] = n_send;
4720 
4721  // Send the counts across to the other processor
4722  MPI_Send(&tmp[0], 3, MPI_INT, d, 0, Comm_pt->mpi_comm());
4723 
4724  // Send it across to the processor
4725  MPI_Send(&unsigned_send_data[0],
4726  n_send,
4727  MPI_UNSIGNED,
4728  d,
4729  0,
4730  Comm_pt->mpi_comm());
4731  }
4732  }
4733  }
4734  }
4735 
4737  {
4738  double t_end = TimingHelpers::timer();
4739  oomph_info << "Total time for Mesh::resize_halo_nodes(): "
4740  << t_end - t_start << std::endl;
4741  }
4742  }
4743 
4744 
4745  //========================================================================
4746  /// Get all the halo data stored in the mesh and add pointers to
4747  /// the data to the map, indexed by global equation number
4748  //=========================================================================
4749  void Mesh::get_all_halo_data(std::map<unsigned, double*>& map_of_halo_data)
4750  {
4751  // Loop over the map of Halo_nodes
4752  for (std::map<unsigned, Vector<Node*>>::iterator it = Halo_node_pt.begin();
4753  it != Halo_node_pt.end();
4754  ++it)
4755  {
4756  // Find the number of nodes
4757  unsigned n_node = (it->second).size();
4758  // Loop over them all
4759  for (unsigned n = 0; n < n_node; n++)
4760  {
4761  // Add the Node's values (including any solid values) to
4762  // the map
4763  (it->second)[n]->add_value_pt_to_map(map_of_halo_data);
4764  }
4765  } // End of loop over halo nodes
4766 
4767  // Now loop over all the halo elements and add their internal data
4768  // Loop over the map of Halo_nodes
4769  for (std::map<unsigned, Vector<GeneralisedElement*>>::iterator it =
4770  Root_halo_element_pt.begin();
4771  it != Root_halo_element_pt.end();
4772  ++it)
4773  {
4774  // Find the number of root elements
4775  unsigned n_element = (it->second).size();
4776  for (unsigned e = 0; e < n_element; e++)
4777  {
4778  GeneralisedElement* el_pt = (it->second)[e];
4779 
4780  // Is it a refineable element?
4781  RefineableElement* ref_el_pt = dynamic_cast<RefineableElement*>(el_pt);
4782  if (ref_el_pt != 0)
4783  {
4784  // Vector of pointers to leaves in tree emanating from
4785  // current root halo element
4786  Vector<Tree*> leaf_pt;
4787  ref_el_pt->tree_pt()->stick_leaves_into_vector(leaf_pt);
4788 
4789  // Loop over leaves and add their objects (the finite elements)
4790  // to vector
4791  unsigned nleaf = leaf_pt.size();
4792  for (unsigned l = 0; l < nleaf; l++)
4793  {
4794  leaf_pt[l]->object_pt()->add_internal_value_pt_to_map(
4795  map_of_halo_data);
4796  }
4797  }
4798  else
4799  {
4800  el_pt->add_internal_value_pt_to_map(map_of_halo_data);
4801  }
4802  }
4803  }
4804 
4805  /// Repeat for the external data
4806  for (std::map<unsigned, Vector<Node*>>::iterator it =
4807  External_halo_node_pt.begin();
4808  it != External_halo_node_pt.end();
4809  ++it)
4810  {
4811  // Find the number of nodes
4812  unsigned n_node = (it->second).size();
4813  // Loop over them all
4814  for (unsigned n = 0; n < n_node; n++)
4815  {
4816  // Add the Node's values (including any solid values) to
4817  // the map
4818  (it->second)[n]->add_value_pt_to_map(map_of_halo_data);
4819  }
4820  } // End of loop over halo nodes
4821 
4822  // Now loop over all the halo elements and add their internal data
4823  // Loop over the map of Halo_nodes
4824  for (std::map<unsigned, Vector<GeneralisedElement*>>::iterator it =
4825  External_halo_element_pt.begin();
4826  it != External_halo_element_pt.end();
4827  ++it)
4828  {
4829  // Find the number of root elements
4830  unsigned n_element = (it->second).size();
4831  for (unsigned e = 0; e < n_element; e++)
4832  {
4833  (it->second)[e]->add_internal_value_pt_to_map(map_of_halo_data);
4834  }
4835  }
4836  }
4837 
4838 
4839  //========================================================================
4840  /// Get halo node stats for this distributed mesh:
4841  /// Average/max/min number of halo nodes over all processors.
4842  /// \b Careful: Involves MPI Broadcasts and must therefore
4843  /// be called on all processors!
4844  //========================================================================
4845  void Mesh::get_halo_node_stats(double& av_number,
4846  unsigned& max_number,
4847  unsigned& min_number)
4848  {
4849  // Storage for number of processors and current processor
4850  int n_proc = Comm_pt->nproc();
4851  int my_rank = Comm_pt->my_rank();
4852 
4853  // Create vector to hold number of halo nodes
4854  Vector<int> nhalo_nodes(n_proc);
4855 
4856  // Stick own number of halo nodes into appropriate entry
4857  int nhalo_node_local = nhalo_node();
4858 
4859  // Gather information on root processor: First argument group
4860  // specifies what is to be sent (one int from each procssor, indicating
4861  // the number of dofs on it), the second group indicates where
4862  // the results are to be gathered (in rank order) on root processor.
4863  MPI_Gather(&nhalo_node_local,
4864  1,
4865  MPI_INT,
4866  &nhalo_nodes[0],
4867  1,
4868  MPI_INT,
4869  0,
4870  Comm_pt->mpi_comm());
4871 
4872  // Initialise stats
4873  av_number = 0.0;
4874  int max = -1;
4875  int min = 1000000000;
4876 
4877  if (my_rank == 0)
4878  {
4879  for (int i = 0; i < n_proc; i++)
4880  {
4881  av_number += double(nhalo_nodes[i]);
4882  if (int(nhalo_nodes[i]) > max) max = nhalo_nodes[i];
4883  if (int(nhalo_nodes[i]) < min) min = nhalo_nodes[i];
4884  }
4885  av_number /= double(n_proc);
4886  }
4887 
4888  // Now broadcast the result back out
4889  MPI_Bcast(&max, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4890  MPI_Bcast(&min, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4891  MPI_Bcast(&av_number, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
4892 
4893  max_number = max;
4894  min_number = min;
4895  }
4896 
4897 
4898  //========================================================================
4899  /// Get haloed node stats for this distributed mesh:
4900  /// Average/max/min number of haloed nodes over all processors.
4901  /// \b Careful: Involves MPI Broadcasts and must therefore
4902  /// be called on all processors!
4903  //========================================================================
4904  void Mesh::get_haloed_node_stats(double& av_number,
4905  unsigned& max_number,
4906  unsigned& min_number)
4907  {
4908  // Storage for number of processors and current processor
4909  int n_proc = Comm_pt->nproc();
4910  int my_rank = Comm_pt->my_rank();
4911 
4912  // Create vector to hold number of haloed nodes
4913  Vector<int> nhaloed_nodes(n_proc);
4914 
4915  // Stick own number of haloed nodes into appropriate entry
4916  int nhaloed_node_local = nhaloed_node();
4917 
4918  // Gather information on root processor: First argument group
4919  // specifies what is to be sent (one int from each procssor, indicating
4920  // the number of dofs on it), the second group indicates where
4921  // the results are to be gathered (in rank order) on root processor.
4922  MPI_Gather(&nhaloed_node_local,
4923  1,
4924  MPI_INT,
4925  &nhaloed_nodes[0],
4926  1,
4927  MPI_INT,
4928  0,
4929  Comm_pt->mpi_comm());
4930 
4931  // Initialise stats
4932  av_number = 0.0;
4933  int max = -1;
4934  int min = 1000000000;
4935 
4936  if (my_rank == 0)
4937  {
4938  for (int i = 0; i < n_proc; i++)
4939  {
4940  av_number += double(nhaloed_nodes[i]);
4941  if (int(nhaloed_nodes[i]) > max) max = nhaloed_nodes[i];
4942  if (int(nhaloed_nodes[i]) < min) min = nhaloed_nodes[i];
4943  }
4944  av_number /= double(n_proc);
4945  }
4946 
4947  // Now broadcast the result back out
4948  MPI_Bcast(&max, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4949  MPI_Bcast(&min, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4950  MPI_Bcast(&av_number, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
4951 
4952  max_number = max;
4953  min_number = min;
4954  }
4955 
4956  //========================================================================
4957  /// Distribute the mesh. Add to vector of deleted elements.
4958  //========================================================================
4960  const Vector<unsigned>& element_domain,
4961  Vector<GeneralisedElement*>& deleted_element_pt,
4962  DocInfo& doc_info,
4963  const bool& report_stats,
4964  const bool& overrule_keep_as_halo_element_status)
4965  {
4966  // Store communicator
4967  Comm_pt = comm_pt;
4968 
4969  // Storage for number of processors and current processor
4970  int n_proc = comm_pt->nproc();
4971  int my_rank = comm_pt->my_rank();
4972 
4973  // Storage for number of elements and number of nodes on this mesh
4974  unsigned nelem = this->nelement();
4975  unsigned nnod = this->nnode();
4976 
4977  std::ostringstream filename;
4978 
4979  // Doc the partitioning (only on processor 0)
4980  //-------------------------------------------
4981  if (doc_info.is_doc_enabled())
4982  {
4983  if (my_rank == 0)
4984  {
4985  // Open files for doc of element partitioning
4986  Vector<std::ofstream*> domain_file(n_proc);
4987  for (int d = 0; d < n_proc; d++)
4988  {
4989  // Note: doc_info.number() was set in Problem::distribute(...) to
4990  // reflect the submesh number
4991  // Clear the filename
4992  filename.str("");
4993  filename << doc_info.directory() << "/domain" << d << "-"
4994  << doc_info.number() << ".dat";
4995  domain_file[d] = new std::ofstream(filename.str().c_str());
4996  }
4997 
4998  // Doc
4999  for (unsigned e = 0; e < nelem; e++)
5000  {
5001  // If we can't cast to a finite element, we can't output because
5002  // there is no output function
5003  FiniteElement* f_el_pt =
5004  dynamic_cast<FiniteElement*>(this->element_pt(e));
5005  if (f_el_pt != 0)
5006  {
5007  f_el_pt->output(*domain_file[element_domain[e]], 5);
5008  }
5009  }
5010 
5011  for (int d = 0; d < n_proc; d++)
5012  {
5013  domain_file[d]->close();
5014  delete domain_file[d];
5015  domain_file[d] = 0;
5016  }
5017  }
5018  }
5019 
5020  // Loop over all elements, associate all
5021  //--------------------------------------
5022  // nodes with the highest-numbered processor and record all
5023  //---------------------------------------------------------
5024  // processors the node is associated with
5025  //---------------------------------------
5026 
5027  // Storage for processors in charge and processors associated with data
5028  std::map<Data*, std::set<unsigned>> processors_associated_with_data;
5029  std::map<Data*, unsigned> processor_in_charge;
5030 
5031  // For all nodes set the processor in charge to zero
5032  for (unsigned j = 0; j < nnod; j++)
5033  {
5034  Node* nod_pt = this->node_pt(j);
5035  processor_in_charge[nod_pt] = 0;
5036  }
5037 
5038  // Loop over elements
5039  for (unsigned e = 0; e < nelem; e++)
5040  {
5041  // Get an element and its domain
5042  FiniteElement* el_pt = dynamic_cast<FiniteElement*>(this->element_pt(e));
5043  // Element will only have nodes if it is a finite element
5044  if (el_pt != 0)
5045  {
5046  unsigned el_domain = element_domain[e];
5047 
5048  // Associate nodes with highest numbered processor
5049  unsigned nnod = el_pt->nnode();
5050  for (unsigned j = 0; j < nnod; j++)
5051  {
5052  Node* nod_pt = el_pt->node_pt(j);
5053 
5054  // processor in charge was initialised to 0 above
5055  if (el_domain > processor_in_charge[nod_pt])
5056  {
5057  processor_in_charge[nod_pt] = el_domain;
5058  }
5059  processors_associated_with_data[nod_pt].insert(el_domain);
5060  }
5061  }
5062  }
5063 
5064  // Doc the partitioning (only on processor 0)
5065  //-------------------------------------------
5066  if (doc_info.is_doc_enabled())
5067  {
5068  if (my_rank == 0)
5069  {
5070  // Open files for doc of node partitioning
5071  Vector<std::ofstream*> node_file(n_proc);
5072  for (int d = 0; d < n_proc; d++)
5073  {
5074  // Note: doc_info.number() was set in Problem::distribute(...) to
5075  // reflect the submesh number...
5076  // Clear the filename
5077  filename.str("");
5078  filename << doc_info.directory() << "/node" << d << "-"
5079  << doc_info.number() << ".dat";
5080  node_file[d] = new std::ofstream(filename.str().c_str());
5081  }
5082 
5083  // Doc
5084  for (unsigned j = 0; j < nnod; j++)
5085  {
5086  Node* nod_pt = this->node_pt(j);
5087  const unsigned n_dim = nod_pt->ndim();
5088  for (unsigned i = 0; i < n_dim; i++)
5089  {
5090  *node_file[processor_in_charge[nod_pt]] << nod_pt->x(i) << " ";
5091  }
5092  *node_file[processor_in_charge[nod_pt]] << "\n";
5093  }
5094  for (int d = 0; d < n_proc; d++)
5095  {
5096  node_file[d]->close();
5097  delete node_file[d];
5098  node_file[d] = 0;
5099  }
5100  }
5101  }
5102 
5103  // Declare all nodes as obsolete. We'll
5104  // change this setting for all nodes that must be retained
5105  // further down
5106  for (unsigned j = 0; j < nnod; j++)
5107  {
5108  this->node_pt(j)->set_obsolete();
5109  }
5110 
5111 
5112  // Backup old mesh data and flush mesh
5113  //-------------------------------------
5114 
5115  // Backup pointers to elements in this mesh
5116  Vector<GeneralisedElement*> backed_up_el_pt(nelem);
5117  for (unsigned e = 0; e < nelem; e++)
5118  {
5119  backed_up_el_pt[e] = this->element_pt(e);
5120  }
5121 
5122  // Info. used to re-generate the boundary element scheme after the
5123  // deletion of elements not belonging to the current processor)
5124 
5125  // Get the number of boundary elements before the deletion of non
5126  // retained elements
5127  const unsigned tmp_nboundary = this->nboundary();
5128  Vector<unsigned> ntmp_boundary_elements(tmp_nboundary);
5129  // In case that there are regions, also get the number of boundary
5130  // elements in each region
5131  Vector<Vector<unsigned>> ntmp_boundary_elements_in_region(tmp_nboundary);
5132  // Also get the finite element version of the elements and back them
5133  // up
5134  Vector<FiniteElement*> backed_up_f_el_pt(nelem);
5135 
5136  // Only do this if the mesh is a TriangleMeshBase
5137  TriangleMeshBase* triangle_mesh_pt = dynamic_cast<TriangleMeshBase*>(this);
5138  bool is_a_triangle_mesh_base_mesh = false;
5139  if (triangle_mesh_pt != 0)
5140  {
5141  // Set the flag to indicate we are working with a TriangleMeshBase
5142  // mesh
5143  is_a_triangle_mesh_base_mesh = true;
5144 
5145  // Are there regions?
5146  const unsigned n_regions = triangle_mesh_pt->nregion();
5147 
5148  // Loop over the boundaries
5149  for (unsigned ib = 0; ib < tmp_nboundary; ib++)
5150  {
5151  // Get the number of boundary elements
5152  ntmp_boundary_elements[ib] = this->nboundary_element(ib);
5153 
5154  // Resize the container
5155  ntmp_boundary_elements_in_region[ib].resize(n_regions);
5156 
5157  // Loop over the regions
5158  for (unsigned rr = 0; rr < n_regions; rr++)
5159  {
5160  // Get the region id
5161  const unsigned region_id =
5162  static_cast<unsigned>(triangle_mesh_pt->region_attribute(rr));
5163 
5164  // Store the number of element in the region (notice we are
5165  // using the region index not the region id to refer to the
5166  // region)
5167  ntmp_boundary_elements_in_region[ib][rr] =
5168  triangle_mesh_pt->nboundary_element_in_region(ib, region_id);
5169 
5170  } // for (rr < n_regions)
5171 
5172  } // for (ib < tmp_nboundary)
5173 
5174  for (unsigned e = 0; e < nelem; e++)
5175  {
5176  // Get the finite element version of the elements and back them
5177  // up
5178  backed_up_f_el_pt[e] = this->finite_element_pt(e);
5179  }
5180 
5181  } // if (triangle_mesh_pt!=0)
5182 
5183  // Flush the mesh storage
5184  this->flush_element_storage();
5185 
5186  // Delete any storage of external elements and nodes
5188 
5189  // Boolean to indicate which element is to be retained
5190  std::vector<bool> element_retained(nelem, false);
5191 
5192  // Storage for element numbers of root halo elements that will be
5193  // retained on current processor: root_halo_element[p][j]
5194  // stores the element number (in the order in which the elements are stored
5195  // in backed_up_el_pt) of the j-th root halo element with processor
5196  // p.
5197  Vector<Vector<int>> root_halo_element(n_proc);
5198 
5199  // Dummy entry to make sure we always have something to send
5200  for (int p = 0; p < n_proc; p++)
5201  {
5202  root_halo_element[p].push_back(-1);
5203  }
5204 
5205  // Determine which elements are going to end up on which processor
5206  //----------------------------------------------------------------
5207  unsigned number_of_retained_elements = 0;
5208 
5209  // Loop over all backed up elements
5210  nelem = backed_up_el_pt.size();
5211  for (unsigned e = 0; e < nelem; e++)
5212  {
5213  // Get element and its domain
5214  GeneralisedElement* el_pt = backed_up_el_pt[e];
5215  unsigned el_domain = element_domain[e];
5216 
5217  // If element is located on current processor add it back to the mesh
5218  if (el_domain == unsigned(my_rank))
5219  {
5220  // Add element to current processor
5221  element_retained[e] = true;
5222  number_of_retained_elements++;
5223  }
5224  // Otherwise we may still need it if it's a halo element:
5225  else
5226  {
5227  // If this current mesh has been told to keep all elements as halos,
5228  // OR the element itself knows that it must be kept then
5229  // keep it
5230  if ((this->Keep_all_elements_as_halos) ||
5231  (el_pt->must_be_kept_as_halo()))
5232  {
5233  if (!overrule_keep_as_halo_element_status)
5234  {
5235  // Add as root halo element whose non-halo counterpart is
5236  // located on processor el_domain
5237  if (!element_retained[e])
5238  {
5239  root_halo_element[el_domain].push_back(e);
5240  element_retained[e] = true;
5241  number_of_retained_elements++;
5242  }
5243  }
5244  }
5245  // Otherwise: Is one of the nodes associated with the current processor?
5246  else
5247  {
5248  // Can only have nodes if this is a finite element
5249  FiniteElement* finite_el_pt = dynamic_cast<FiniteElement*>(el_pt);
5250  if (finite_el_pt != 0)
5251  {
5252  unsigned n_node = finite_el_pt->nnode();
5253  for (unsigned n = 0; n < n_node; n++)
5254  {
5255  Node* nod_pt = finite_el_pt->node_pt(n);
5256 
5257  // Keep element? (use stl find?)
5258  unsigned keep_it = false;
5259  for (std::set<unsigned>::iterator it =
5260  processors_associated_with_data[nod_pt].begin();
5261  it != processors_associated_with_data[nod_pt].end();
5262  it++)
5263  {
5264  if (*it == unsigned(my_rank))
5265  {
5266  keep_it = true;
5267  // Break out of the loop over processors
5268  break;
5269  }
5270  }
5271 
5272  // Add a root halo element either if keep_it=true
5273  if (keep_it)
5274  {
5275  // Add as root halo element whose non-halo counterpart is
5276  // located on processor el_domain
5277  if (!element_retained[e])
5278  {
5279  root_halo_element[el_domain].push_back(e);
5280  element_retained[e] = true;
5281  number_of_retained_elements++;
5282  }
5283  // Now break out of loop over nodes
5284  break;
5285  }
5286  }
5287  }
5288  } // End of testing for halo by virtue of shared nodes
5289  } // End of halo element conditions
5290  } // end of loop over elements
5291 
5292  // First check that the number of elements is greater than zero, when
5293  // working with submeshes it may be possible that some of them have
5294  // no elements (face element meshes) since those may have been
5295  // deleted in "Problem::actions_before_distribute()"
5296  if (nelem > 0)
5297  {
5298  // Check that we are working with a TriangleMeshBase mesh, if
5299  // that is the case then we need to create shared boundaries
5300  if (is_a_triangle_mesh_base_mesh)
5301  {
5302  // Creation of shared boundaries
5303  // ------------------------------
5304  // All processors share the same boundary id for the created
5305  // shared boundary. We need all the elements on all processors,
5306  // that is why this step is performed before the deletion of the
5307  // elements not associated to the current processor.
5308  // N.B.: This applies only to unstructured meshes
5309  this->create_shared_boundaries(comm_pt,
5310  element_domain,
5311  backed_up_el_pt,
5312  backed_up_f_el_pt,
5313  processors_associated_with_data,
5314  overrule_keep_as_halo_element_status);
5315  } // if (is_a_triangle_mesh_base_mesh)
5316  } // if (nelem > 0)
5317 
5318  // NOTE: No need to add additional layer of halo elements.
5319  // Procedure for removing "overlooked" halo nodes in
5320  // deals classify_halo_and_haloed_nodes() deals
5321  // with the problem addressed here. [code that dealt with this
5322  // problem at distribution stage has been removed]
5323 
5324  // Store the finite element pointer version of the elements that are
5325  // about to be deleted, used to reset the boundary elements info
5326  Vector<FiniteElement*> deleted_f_element_pt;
5327 
5328  // Copy the elements associated with the actual
5329  // current processor into its own permanent storage.
5330  // Do it in the order in which the elements appeared originally
5331  nelem = backed_up_el_pt.size();
5332  for (unsigned e = 0; e < nelem; e++)
5333  {
5334  GeneralisedElement* el_pt = backed_up_el_pt[e];
5335  if (element_retained[e])
5336  {
5337  this->add_element_pt(el_pt);
5338  }
5339  else
5340  {
5341  // Flush the object attached to the tree for this element?
5342  RefineableElement* ref_el_pt = dynamic_cast<RefineableElement*>(el_pt);
5343  if (ref_el_pt != 0)
5344  {
5345  ref_el_pt->tree_pt()->flush_object();
5346  }
5347 
5348 
5349  // Store pointer to the element that's about to be deleted.
5350 
5351  // Only for structured meshes since this "deleted_element_pt"
5352  // vector is used in the "problem" class to set null pointer to
5353  // the deleted elements in the Base_mesh_element_pt structure
5354  if (!is_a_triangle_mesh_base_mesh)
5355  {
5356  deleted_element_pt.push_back(el_pt);
5357  } // if (!is_a_triangle_mesh_base_mesh)
5358 
5359  if (is_a_triangle_mesh_base_mesh)
5360  {
5361  // Store pointer to the finite element that's about to be deleted
5362  deleted_f_element_pt.push_back(backed_up_f_el_pt[e]);
5363  }
5364 
5365  // Delete the element
5366  delete el_pt;
5367  }
5368  }
5369 
5370  // Copy the root halo elements associated with the actual
5371  // current processor into its own permanent storage; the order
5372  // here is somewhat random but we compensate for that by
5373  // ensuring that the corresponding haloed elements are
5374  // added in the same order below
5375 #ifdef PARANOID
5376  std::map<unsigned, bool> done;
5377 #endif
5378  for (int d = 0; d < n_proc; d++)
5379  {
5380  nelem = root_halo_element[d].size();
5381  for (unsigned e = 0; e < nelem; e++)
5382  {
5383  int number = root_halo_element[d][e];
5384  if (number >= 0)
5385  {
5386 #ifdef PARANOID
5387  if (done[number])
5388  {
5389  std::ostringstream error_message;
5390  error_message << "Have already added element " << number
5391  << " as root halo element\n"
5392  << std::endl;
5393  throw OomphLibError(error_message.str(),
5394  OOMPH_CURRENT_FUNCTION,
5395  OOMPH_EXCEPTION_LOCATION);
5396  }
5397  done[number] = true;
5398 #endif
5399  this->add_root_halo_element_pt(d, backed_up_el_pt[number]);
5400  }
5401  }
5402  }
5403 
5404 
5405  // Now get root haloed elements: root_haloed_element[p][j] stores
5406  // the element number (in the order in which the elements are stored
5407  // in backedup_el_pt) of the j-th rooted halo element with processor
5408  // p. On proc my_proc this the same as root_haloed_element[my_proc][j]
5409  // on processor p, so get the information by gatherv operations.
5410  Vector<Vector<unsigned>> root_haloed_element(n_proc);
5411 
5412  // Find out number of root halo elements with each other proc
5413  Vector<int> nhalo(n_proc, 0);
5414  Vector<int> nhaloed(n_proc, 0);
5415  for (int p = 0; p < n_proc; p++)
5416  {
5417  nhalo[p] = root_halo_element[p].size();
5418  }
5419 
5420  // Each processor sends number of halo elements it has with processor
5421  // p to processor p where this information is stored in nhaloed[...]
5422  for (int p = 0; p < n_proc; p++)
5423  {
5424  // Gather the p-th entries in nhalo from every processor on
5425  // processor p and store them in nhaloed consecutively
5426  // starting at beginning
5427  MPI_Gather(
5428  &nhalo[p], 1, MPI_INT, &nhaloed[0], 1, MPI_INT, p, comm_pt->mpi_comm());
5429  }
5430 
5431  // In the big sequence of concatenated root halo elements (enumerated
5432  // individually on the various processors) where do the root halo
5433  // elements from a given processor start? Also figure out how many
5434  // root haloed elements there are in total by summing up their numbers
5435  Vector<int> start_index(n_proc, 0);
5436  unsigned total_number_of_root_haloed_elements = 0;
5437  for (int i_proc = 0; i_proc < n_proc; i_proc++)
5438  {
5439  total_number_of_root_haloed_elements += nhaloed[i_proc];
5440  if (i_proc != 0)
5441  {
5442  start_index[i_proc] =
5443  total_number_of_root_haloed_elements - nhaloed[i_proc];
5444  }
5445  else
5446  {
5447  start_index[0] = 0;
5448  }
5449  }
5450 
5451  // Storage for all root haloed elements from the various processors, one
5452  // after the other, with some padding from negative entries to avoid
5453  // zero length vectors
5454  Vector<int> all_root_haloed_element(total_number_of_root_haloed_elements,
5455  0);
5456 
5457  // Now send the ids of the relevant elements via gatherv
5458  for (int p = 0; p < n_proc; p++)
5459  {
5460  // Gather the p-th entries in nhalo from every processor on
5461  // processor p and store them in nhaloed consecutively
5462  // starting at beginning
5463  MPI_Gatherv(&root_halo_element[p][0], // pointer to first entry in vector
5464  // to be gathered on processor p
5465  nhalo[p], // Number of entries to be sent
5466  MPI_INT,
5467  &all_root_haloed_element[0], // Target -- this will store
5468  // the element numbers of
5469  // all root haloed elements
5470  // received from other processors
5471  // in order
5472  &nhaloed[0], // Pointer to the vector containing the lengths
5473  // of the vectors received from elsewhere
5474  &start_index[0], // "offset" for storage of vector received
5475  // from various processors in the global
5476  // concatenated vector
5477  MPI_INT,
5478  p, // processor that gathers the information
5479  comm_pt->mpi_comm());
5480  }
5481 
5482 
5483  // Determine root haloed elements
5484  //-------------------------------
5485 
5486  // Loop over all other processors
5487  unsigned count = 0;
5488  for (int d = 0; d < n_proc; d++)
5489  {
5490 #ifdef PARANOID
5491  std::map<unsigned, bool> done;
5492 #endif
5493 
5494  // Loop over root haloed elements with specified processor
5495  unsigned n = nhaloed[d];
5496  for (unsigned e = 0; e < n; e++)
5497  {
5498  int number = all_root_haloed_element[count];
5499  count++;
5500 
5501  // Ignore padded -1s that were only inserted to avoid
5502  // zero sized vectors
5503  if (number >= 0)
5504  {
5505  // Get pointer to element
5506  GeneralisedElement* el_pt = backed_up_el_pt[number];
5507 
5508  // Halo elements can't be haloed themselves
5509  if (!el_pt->is_halo())
5510  {
5511 #ifdef PARANOID
5512  if (done[number])
5513  {
5514  std::ostringstream error_message;
5515  error_message << "Have already added element " << number
5516  << " as root haloed element\n"
5517  << std::endl;
5518  throw OomphLibError(error_message.str(),
5519  OOMPH_CURRENT_FUNCTION,
5520  OOMPH_EXCEPTION_LOCATION);
5521  }
5522  done[number] = true;
5523 #endif
5524 
5525  // Current element is haloed by other processor
5526  this->add_root_haloed_element_pt(d, el_pt);
5527  }
5528  }
5529  }
5530  }
5531 
5532 
5533  // Doc stats
5534  if (report_stats)
5535  {
5536  oomph_info << "Processor " << my_rank << " holds " << this->nelement()
5537  << " elements of which " << this->nroot_halo_element()
5538  << " are root halo elements \n while "
5539  << this->nroot_haloed_element() << " are root haloed elements"
5540  << std::endl;
5541  }
5542 
5543 
5544  // Loop over all retained elements and mark their nodes
5545  //-----------------------------------------------------
5546  // as to be retained too (some double counting going on here)
5547  //-----------------------------------------------------------
5548  nelem = this->nelement();
5549  for (unsigned e = 0; e < nelem; e++)
5550  {
5551  FiniteElement* f_el_pt =
5552  dynamic_cast<FiniteElement*>(this->element_pt(e));
5553 
5554  // If we have a finite element
5555  if (f_el_pt != 0)
5556  {
5557  // Loop over nodes
5558  unsigned nnod = f_el_pt->nnode();
5559  for (unsigned j = 0; j < nnod; j++)
5560  {
5561  Node* nod_pt = f_el_pt->node_pt(j);
5562  nod_pt->set_non_obsolete();
5563  }
5564  }
5565  }
5566 
5567 
5568  // Now remove the pruned nodes
5569  this->prune_dead_nodes();
5570 
5571 
5572 #ifdef OOMPH_HAS_TRIANGLE_LIB
5573  if (is_a_triangle_mesh_base_mesh)
5574  {
5575  triangle_mesh_pt->reset_boundary_element_info(
5576  ntmp_boundary_elements,
5577  ntmp_boundary_elements_in_region,
5578  deleted_f_element_pt);
5579  } // if (tri_mesh_pt!=0)
5580  else
5581  {
5582 #endif // #ifdef OOMPH_HAS_TRIANGLE_LIB
5583 
5584  // Temporarly set the mesh as distributed
5586 
5587 #ifdef OOMPH_HAS_TRIANGLE_LIB
5588  }
5589 #endif
5590 
5591  // Re-setup tree forest. (Call this every time even if
5592  // a (distributed) mesh has no elements on this processor.
5593  // We still need to participate in communication.)
5594  TreeBasedRefineableMeshBase* ref_mesh_pt =
5595  dynamic_cast<TreeBasedRefineableMeshBase*>(this);
5596  if (ref_mesh_pt != 0)
5597  {
5598  ref_mesh_pt->setup_tree_forest();
5599  }
5600 
5601  // Classify nodes
5602  classify_halo_and_haloed_nodes(doc_info, report_stats);
5603 
5604  // Doc?
5605  //-----
5606  if (doc_info.is_doc_enabled())
5607  {
5608  doc_mesh_distribution(doc_info);
5609  }
5610  }
5611 
5612 
5613  //========================================================================
5614  /// (Irreversibly) prune halo(ed) elements and nodes, usually
5615  /// after another round of refinement, to get rid of
5616  /// excessively wide halo layers. Note that the current
5617  /// mesh will be now regarded as the base mesh and no unrefinement
5618  /// relative to it will be possible once this function
5619  /// has been called.
5620  //========================================================================
5622  Vector<GeneralisedElement*>& deleted_element_pt,
5623  DocInfo& doc_info,
5624  const bool& report_stats)
5625  {
5626  // MemoryUsage::doc_memory_usage("at start of mesh-level prunes");
5627 
5628 
5629 #ifdef OOMPH_HAS_MPI
5630  // Delete any external element storage before performing the redistribution
5631  // (in particular, external halo nodes that are on mesh boundaries)
5633 #endif
5634 
5635  TreeBasedRefineableMeshBase* ref_mesh_pt =
5636  dynamic_cast<TreeBasedRefineableMeshBase*>(this);
5637  if (ref_mesh_pt != 0)
5638  {
5639  // Storage for number of processors and current processor
5640  int n_proc = Comm_pt->nproc();
5641  int my_rank = Comm_pt->my_rank();
5642 
5643  // Doc stats
5644  if (report_stats)
5645  {
5646  oomph_info
5647  << "\n----------------------------------------------------\n";
5648  oomph_info << "Before pruning: Processor " << my_rank << " holds "
5649  << this->nelement() << " elements of which "
5650  << this->nroot_halo_element()
5651  << " are root halo elements \n while "
5652  << this->nroot_haloed_element()
5653  << " are root haloed elements" << std::endl;
5654 
5655  // Report total number of halo(ed) and shared nodes for this process
5656  oomph_info << "Before pruning: Processor " << my_rank << " holds "
5657  << this->nnode() << " nodes of which " << this->nhalo_node()
5658  << " are halo nodes \n while " << this->nhaloed_node()
5659  << " are haloed nodes, and " << this->nshared_node()
5660  << " are shared nodes." << std::endl;
5661 
5662  // Report number of halo(ed) and shared nodes with each domain
5663  // from the current process
5664  for (int iproc = 0; iproc < n_proc; iproc++)
5665  {
5666  oomph_info << "Before pruning: With process " << iproc
5667  << ", there are " << this->nhalo_node(iproc)
5668  << " halo nodes, and " << std::endl
5669  << this->nhaloed_node(iproc) << " haloed nodes, and "
5670  << this->nshared_node(iproc) << " shared nodes"
5671  << std::endl;
5672  }
5673  oomph_info
5674  << "----------------------------------------------------\n\n";
5675  }
5676 
5677 
5678  double t_start = 0.0;
5680  {
5681  t_start = TimingHelpers::timer();
5682  }
5683 
5684  // Declare all nodes as obsolete. We'll
5685  // change this setting for all nodes that must be retained
5686  // further down
5687  unsigned nnod = this->nnode();
5688  for (unsigned j = 0; j < nnod; j++)
5689  {
5690  this->node_pt(j)->set_obsolete();
5691  }
5692 
5693  // Backup pointers to elements in this mesh (they must be finite elements
5694  // beacuse it's a refineable mesh)
5695  unsigned nelem = this->nelement();
5696  Vector<FiniteElement*> backed_up_el_pt(nelem);
5697  std::map<RefineableElement*, bool> keep_element;
5698  for (unsigned e = 0; e < nelem; e++)
5699  {
5700  backed_up_el_pt[e] = this->finite_element_pt(e);
5701  }
5702 
5703  // MemoryUsage::doc_memory_usage("after backed up elements");
5704 
5705  // Get the min and max refinement level, and current refinement pattern
5706  unsigned min_ref = 0;
5707  unsigned max_ref = 0;
5708 
5709  // Get min and max refinement level
5710  unsigned local_min_ref = 0;
5711  unsigned local_max_ref = 0;
5712  ref_mesh_pt->get_refinement_levels(local_min_ref, local_max_ref);
5713 
5714  // Reconcile between processors: If (e.g. following distribution/pruning)
5715  // the mesh has no elements on this processor) then ignore its
5716  // contribution to the poll of max/min refinement levels
5717  int int_local_min_ref = local_min_ref;
5718  int int_local_max_ref = local_max_ref;
5719 
5720  if (nelem == 0)
5721  {
5722  int_local_min_ref = INT_MAX;
5723  int_local_max_ref = INT_MIN;
5724  }
5725 
5726  int int_min_ref = 0;
5727  int int_max_ref = 0;
5728 
5729  MPI_Allreduce(&int_local_min_ref,
5730  &int_min_ref,
5731  1,
5732  MPI_INT,
5733  MPI_MIN,
5734  Comm_pt->mpi_comm());
5735  MPI_Allreduce(&int_local_max_ref,
5736  &int_max_ref,
5737  1,
5738  MPI_INT,
5739  MPI_MAX,
5740  Comm_pt->mpi_comm());
5741 
5742  min_ref = unsigned(int_min_ref);
5743  max_ref = unsigned(int_max_ref);
5744 
5745  // Get refinement pattern
5746  Vector<Vector<unsigned>> current_refined;
5747  ref_mesh_pt->get_refinement_pattern(current_refined);
5748 
5749  // get_refinement_pattern refers to the elements at each level
5750  // that were refined when proceeding to the next level
5751  int local_n_ref = current_refined.size();
5752 
5753  // Bypass if no elements
5754  if (nelem == 0)
5755  {
5756  local_n_ref = INT_MIN;
5757  }
5758 
5759  // Reconcile between processors
5760  int int_n_ref = 0;
5761  MPI_Allreduce(
5762  &local_n_ref, &int_n_ref, 1, MPI_INT, MPI_MAX, Comm_pt->mpi_comm());
5763  unsigned n_ref = int(int_n_ref);
5764 
5765 
5766  double t_end = 0.0;
5768  {
5769  t_end = TimingHelpers::timer();
5770  oomph_info
5771  << "Time for establishing refinement levels in "
5772  << " Mesh::prune_halo_elements_and_nodes() [includes comms]: "
5773  << t_end - t_start << std::endl;
5774  t_start = TimingHelpers::timer();
5775  }
5776 
5777  // MemoryUsage::doc_memory_usage("after establishing refinement levels");
5778 
5779  // Bypass everything until next comms if no elements
5780  if (nelem > 0)
5781  {
5782  // Loop over all elements; keep those on the min refinement level
5783  // Need to go back to the level indicated by min_ref
5784  unsigned base_level = n_ref - (max_ref - min_ref);
5785 
5786  // This is the new minimum uniform refinement level
5787  // relative to total unrefined base mesh
5788  ref_mesh_pt->uniform_refinement_level_when_pruned() = min_ref;
5789 
5790  // Get the elements at the specified "base" refinement level
5791  Vector<RefineableElement*> base_level_elements_pt;
5792  ref_mesh_pt->get_elements_at_refinement_level(base_level,
5793  base_level_elements_pt);
5794 
5795  // Loop over the elements at this level
5796  unsigned n_base_el = base_level_elements_pt.size();
5797  for (unsigned e = 0; e < n_base_el; e++)
5798  {
5799  // Extract correct element...
5800  RefineableElement* ref_el_pt = base_level_elements_pt[e];
5801 
5802 
5803  // Check it exists
5804  if (ref_el_pt != 0)
5805  {
5806  // Keep all non-halo elements, remove excess halos
5807  if ((!ref_el_pt->is_halo()) || (ref_el_pt->must_be_kept_as_halo()))
5808  {
5809  keep_element[ref_el_pt] = true;
5810 
5811  // Loop over this non-halo element's nodes and retain them too
5812  unsigned nnod = ref_el_pt->nnode();
5813  for (unsigned j = 0; j < nnod; j++)
5814  {
5815  ref_el_pt->node_pt(j)->set_non_obsolete();
5816  }
5817  }
5818  }
5819 
5820  } // end loop over base level elements
5821  }
5822 
5823  // Synchronise refinement level when pruned over all meshes even if they
5824  // were empty (in which case the uniform refinement level is still zero
5825  // so go for max
5826  unsigned n_unif = 0;
5827  unsigned n_unif_local =
5828  ref_mesh_pt->uniform_refinement_level_when_pruned();
5829  MPI_Allreduce(
5830  &n_unif_local, &n_unif, 1, MPI_UNSIGNED, MPI_MAX, Comm_pt->mpi_comm());
5831  ref_mesh_pt->uniform_refinement_level_when_pruned() = n_unif;
5832 
5833 
5834  t_end = 0.0;
5836  {
5837  t_end = TimingHelpers::timer();
5838  oomph_info
5839  << "Time for synchronising refinement levels in "
5840  << " Mesh::prune_halo_elements_and_nodes() [includes comms]: "
5841  << t_end - t_start << std::endl;
5842  t_start = TimingHelpers::timer();
5843  }
5844 
5845 
5846  // MemoryUsage::doc_memory_usage("after synchronising refinement levels");
5847 
5848 
5849  // Now work on which "root" halo elements to keep at this level
5850  // Can't use the current set directly; however,
5851  // we know the refinement level of the current halo, so
5852  // it is possible to go from that backwards to find the "father
5853  // halo element" necessary to complete this step
5854 
5855  // Temp map of vectors holding the pointers to the root halo elements
5856  std::map<unsigned, Vector<FiniteElement*>> tmp_root_halo_element_pt;
5857 
5858  // Temp map of vectors holding the pointers to the root haloed elements
5859  std::map<unsigned, Vector<FiniteElement*>> tmp_root_haloed_element_pt;
5860 
5861  // Make sure we only push back each element once
5862  std::map<unsigned, std::map<FiniteElement*, bool>>
5863  tmp_root_halo_element_is_retained;
5864 
5865  // Map to store if a halo element survives
5866  std::map<FiniteElement*, bool> halo_element_is_retained;
5867 
5868  for (int domain = 0; domain < n_proc; domain++)
5869  {
5870  // Get vector of halo elements with processor domain by copy operation
5871  Vector<GeneralisedElement*> halo_elem_pt(this->halo_element_pt(domain));
5872 
5873  // Loop over halo elements associated with this adjacent domain
5874  unsigned nelem = halo_elem_pt.size();
5875  for (unsigned e = 0; e < nelem; e++)
5876  {
5877  // Get element
5878  RefineableElement* ref_el_pt =
5879  dynamic_cast<RefineableElement*>(halo_elem_pt[e]);
5880 
5881  // An element should only be kept if its refinement
5882  // level is the same as the minimum refinement level
5883  unsigned halo_el_level = ref_el_pt->refinement_level();
5884 
5885  RefineableElement* el_pt = 0;
5886  if (halo_el_level == min_ref)
5887  {
5888  // Already at the correct level
5889  el_pt = ref_el_pt;
5890  }
5891  else
5892  {
5893  // Need to go up the tree to the father element at min_ref
5894  RefineableElement* father_el_pt;
5895  ref_el_pt->get_father_at_refinement_level(min_ref, father_el_pt);
5896  el_pt = father_el_pt;
5897  }
5898 
5899  // Now loop over nodes in the halo element and retain it as
5900  // halo element if any of it's nodes are shared with one of the
5901  // non-halo-elements that we've already identified earlier -- these
5902  // were set to non-obsolete above.
5903  unsigned nnod = el_pt->nnode();
5904  for (unsigned j = 0; j < nnod; j++)
5905  {
5906  // Node has been reclaimed by one of the non-halo elements above
5907  Node* nod_pt = el_pt->node_pt(j);
5908  if (!nod_pt->is_obsolete())
5909  {
5910  // Keep element and add it to preliminary storage for
5911  // halo elements associated with current neighbouring domain
5912  if (!tmp_root_halo_element_is_retained[domain][el_pt])
5913  {
5914  tmp_root_halo_element_pt[domain].push_back(el_pt);
5915  tmp_root_halo_element_is_retained[domain][el_pt] = true;
5916  }
5917  keep_element[el_pt] = true;
5918  halo_element_is_retained[el_pt] = true;
5919  break;
5920  }
5921  }
5922  }
5923  }
5924 
5926  {
5927  t_end = TimingHelpers::timer();
5928  oomph_info << "Time for setup of retention pattern in "
5929  << " Mesh::prune_halo_elements_and_nodes(): "
5930  << t_end - t_start << std::endl;
5931  t_start = TimingHelpers::timer();
5932  }
5933 
5934  // MemoryUsage::doc_memory_usage("after setup of retention pattern");
5935 
5936  // Make sure everybody finishes this part
5937  MPI_Barrier(Comm_pt->mpi_comm());
5938 
5939  // Now all processors have decided (independently) which of their
5940  // (to-be root) halo elements they wish to retain. Now we need to figure
5941  // out which of their elements are haloed and add them in the appropriate
5942  // order into the haloed element scheme. For this we exploit that
5943  // the halo and haloed elements are accessed in the same order on
5944  // all processors!
5945 
5946  // Identify haloed elements on domain d
5947  for (int d = 0; d < n_proc; d++)
5948  {
5949  // Loop over domains that halo this domain
5950  for (int dd = 0; dd < n_proc; dd++)
5951  {
5952  // Dont't talk to yourself
5953  if (d != dd)
5954  {
5955  // If we're identifying my haloed elements:
5956  if (d == my_rank)
5957  {
5958  // Get vector all elements that are currently haloed by domain dd
5959  Vector<GeneralisedElement*> haloed_elem_pt(
5960  this->haloed_element_pt(dd));
5961 
5962  // Create a vector of ints to indicate if the halo element
5963  // on processor dd processor was kept
5964  unsigned nelem = haloed_elem_pt.size();
5965  Vector<int> halo_kept(nelem);
5966 
5967  // Receive this vector from processor dd
5968  if (nelem != 0)
5969  {
5970  MPI_Status status;
5971  MPI_Recv(&halo_kept[0],
5972  nelem,
5973  MPI_INT,
5974  dd,
5975  0,
5976  Comm_pt->mpi_comm(),
5977  &status);
5978 
5979  // Classify haloed element accordingly
5980  for (unsigned e = 0; e < nelem; e++)
5981  {
5982  RefineableElement* ref_el_pt =
5983  dynamic_cast<RefineableElement*>(haloed_elem_pt[e]);
5984 
5985  // An element should only be kept if its refinement
5986  // level is the same as the minimum refinement level
5987  unsigned haloed_el_level = ref_el_pt->refinement_level();
5988 
5989  // Go up the tree to the correct level
5990  RefineableElement* el_pt;
5991 
5992  if (haloed_el_level == min_ref)
5993  {
5994  // Already at the correct level
5995  el_pt = ref_el_pt;
5996  }
5997  else
5998  {
5999  // Need to go up the tree to the father element at min_ref
6000  RefineableElement* father_el_pt;
6001  ref_el_pt->get_father_at_refinement_level(min_ref,
6002  father_el_pt);
6003  el_pt = father_el_pt;
6004  }
6005 
6006  if (halo_kept[e] == 1)
6007  {
6008  // I am being haloed by processor dd
6009  // Only keep it if it's not already in the storage
6010  bool already_root_haloed = false;
6011  unsigned n_root_haloed =
6012  tmp_root_haloed_element_pt[dd].size();
6013  for (unsigned e_root = 0; e_root < n_root_haloed; e_root++)
6014  {
6015  if (el_pt == tmp_root_haloed_element_pt[dd][e_root])
6016  {
6017  already_root_haloed = true;
6018  break;
6019  }
6020  }
6021  if (!already_root_haloed)
6022  {
6023  tmp_root_haloed_element_pt[dd].push_back(el_pt);
6024  }
6025  }
6026  }
6027  }
6028  }
6029  else
6030  {
6031  // If we're dealing with my halo elements:
6032  if (dd == my_rank)
6033  {
6034  // Find (current) halo elements on processor dd whose non-halo
6035  // is on processor d
6036  Vector<GeneralisedElement*> halo_elem_pt(
6037  this->halo_element_pt(d));
6038 
6039  // Create a vector of ints to indicate if the halo
6040  // element was kept
6041  unsigned nelem = halo_elem_pt.size();
6042  Vector<int> halo_kept(nelem, 0);
6043  for (unsigned e = 0; e < nelem; e++)
6044  {
6045  RefineableElement* ref_el_pt =
6046  dynamic_cast<RefineableElement*>(halo_elem_pt[e]);
6047 
6048  // An element should only be kept if its refinement
6049  // level is the same as the minimum refinement level
6050  unsigned halo_el_level = ref_el_pt->refinement_level();
6051 
6052  // Go up the tree to the correct level
6053  RefineableElement* el_pt;
6054  if (halo_el_level == min_ref)
6055  {
6056  // Already at the correct level
6057  el_pt = ref_el_pt;
6058  }
6059  else
6060  {
6061  // Need to go up the tree to the father element at min_ref
6062  RefineableElement* father_el_pt;
6063  ref_el_pt->get_father_at_refinement_level(min_ref,
6064  father_el_pt);
6065  el_pt = father_el_pt;
6066  }
6067 
6068  if (halo_element_is_retained[el_pt])
6069  {
6070  halo_kept[e] = 1;
6071  }
6072  }
6073 
6074  // Now send this vector to processor d to tell it which of
6075  // the haloed elements (which are listed in the same order)
6076  // are to be retained as haloed elements.
6077  if (nelem != 0)
6078  {
6079  MPI_Send(
6080  &halo_kept[0], nelem, MPI_INT, d, 0, Comm_pt->mpi_comm());
6081  }
6082  }
6083  }
6084  }
6085  }
6086  }
6087 
6089  {
6090  t_end = TimingHelpers::timer();
6091  oomph_info << "Time for pt2pt comms of retention pattern in "
6092  << " Mesh::prune_halo_elements_and_nodes(): "
6093  << t_end - t_start << std::endl;
6094  t_start = TimingHelpers::timer();
6095  }
6096 
6097  // MemoryUsage::doc_memory_usage("after pt2pt comms of retention
6098  // pattern");
6099 
6100 
6101  // Backup pointers to nodes in this mesh
6102  nnod = this->nnode();
6103  Vector<Node*> backed_up_nod_pt(nnod);
6104  for (unsigned j = 0; j < nnod; j++)
6105  {
6106  backed_up_nod_pt[j] = this->node_pt(j);
6107  }
6108 
6109  // Flush the mesh storage
6111 
6112  // Storage for non-active trees/elements that are to be deleted
6113  std::set<Tree*> trees_to_be_deleted_pt;
6114 
6115  // Loop over all backed-up elements
6116  nelem = backed_up_el_pt.size();
6117  for (unsigned e = 0; e < nelem; e++)
6118  {
6119  RefineableElement* ref_el_pt =
6120  dynamic_cast<RefineableElement*>(backed_up_el_pt[e]);
6121 
6122  // Get refinement level
6123  unsigned level = ref_el_pt->refinement_level();
6124 
6125  // Go up the tree to the correct level
6126  RefineableElement* el_pt;
6127 
6128  if (level == min_ref)
6129  {
6130  // Already at the correct level
6131  el_pt = ref_el_pt;
6132  }
6133  else
6134  {
6135  // Need to go up the tree to the father element at min_ref
6136  RefineableElement* father_el_pt;
6137  ref_el_pt->get_father_at_refinement_level(min_ref, father_el_pt);
6138  el_pt = father_el_pt;
6139  }
6140 
6141  // If the base element is going to be kept, then add the current element
6142  // to the "new" mesh
6143  if (keep_element[el_pt])
6144  {
6145  this->add_element_pt(ref_el_pt);
6146  }
6147  else
6148  {
6149  // Flush the object attached to the tree for this element?
6150  RefineableElement* my_el_pt =
6151  dynamic_cast<RefineableElement*>(ref_el_pt);
6152  if (my_el_pt != 0)
6153  {
6154  my_el_pt->tree_pt()->flush_object();
6155  }
6156 
6157  // Get associated tree root
6158  Tree* tmp_tree_root_pt = my_el_pt->tree_pt()->root_pt();
6159 
6160  // Get all the tree nodes
6161  Vector<Tree*> tmp_all_tree_nodes_pt;
6162  tmp_tree_root_pt->stick_all_tree_nodes_into_vector(
6163  tmp_all_tree_nodes_pt);
6164 
6165  // Loop over all of them and delete associated object/
6166  // and flush any record of it in tree
6167  unsigned n_tree = tmp_all_tree_nodes_pt.size();
6168  for (unsigned j = 0; j < n_tree; j++)
6169  {
6170  if (tmp_all_tree_nodes_pt[j]->object_pt() != 0)
6171  {
6172  unsigned lev =
6173  tmp_all_tree_nodes_pt[j]->object_pt()->refinement_level();
6174  if (lev <= min_ref)
6175  {
6176  if (!keep_element[tmp_all_tree_nodes_pt[j]->object_pt()])
6177  {
6178  trees_to_be_deleted_pt.insert(tmp_all_tree_nodes_pt[j]);
6179  }
6180  }
6181  }
6182  }
6183 
6184  // Delete the element
6185  deleted_element_pt.push_back(ref_el_pt);
6186  delete ref_el_pt;
6187  }
6188  }
6189 
6190  // MemoryUsage::doc_memory_usage("before deleting superfluous elements");
6191 
6192  // Delete superfluous elements
6193  for (std::set<Tree*>::iterator it = trees_to_be_deleted_pt.begin();
6194  it != trees_to_be_deleted_pt.end();
6195  it++)
6196  {
6197  Tree* tree_pt = (*it);
6198  if (tree_pt->object_pt() != 0)
6199  {
6200  deleted_element_pt.push_back(tree_pt->object_pt());
6201  delete tree_pt->object_pt();
6202  tree_pt->flush_object();
6203  }
6204  }
6205 
6206  // MemoryUsage::doc_memory_usage("after deleting superfluous elements");
6207 
6208 
6209  // Wipe the storage scheme for (root) halo(ed) elements and then re-assign
6210  Root_haloed_element_pt.clear();
6211  Root_halo_element_pt.clear();
6212  for (int domain = 0; domain < n_proc; domain++)
6213  {
6214  unsigned nelem = tmp_root_halo_element_pt[domain].size();
6215  for (unsigned e = 0; e < nelem; e++)
6216  {
6217  Root_halo_element_pt[domain].push_back(
6218  tmp_root_halo_element_pt[domain][e]);
6219  }
6220 
6221  nelem = tmp_root_haloed_element_pt[domain].size();
6222  for (unsigned e = 0; e < nelem; e++)
6223  {
6224  Root_haloed_element_pt[domain].push_back(
6225  tmp_root_haloed_element_pt[domain][e]);
6226  }
6227  }
6228 
6229  // MemoryUsage::doc_memory_usage(
6230  // "after wiping storage scheme for root halo/ed els");
6231 
6232  // Loop over all retained elements at this level and mark their nodes
6233  //-------------------------------------------------------------------
6234  // as to be retained too (some double counting going on here)
6235  //-----------------------------------------------------------
6236  nelem = this->nelement();
6237  for (unsigned e = 0; e < nelem; e++)
6238  {
6239  FiniteElement* el_pt = this->finite_element_pt(e);
6240 
6241  // Loop over nodes
6242  unsigned nnod = el_pt->nnode();
6243  for (unsigned j = 0; j < nnod; j++)
6244  {
6245  Node* nod_pt = el_pt->node_pt(j);
6246  nod_pt->set_non_obsolete();
6247  }
6248  }
6249 
6250  // Complete rebuild of mesh by adding retained nodes
6251  // Note that they are added in the order in which they
6252  // occured in the original mesh as this guarantees the
6253  // synchronisity between the serialised access to halo
6254  // and haloed nodes from different processors.
6255  nnod = backed_up_nod_pt.size();
6256  for (unsigned j = 0; j < nnod; j++)
6257  {
6258  Node* nod_pt = backed_up_nod_pt[j];
6259  if (!nod_pt->is_obsolete())
6260  {
6261  // Not obsolete so add it back to the mesh
6262  this->add_node_pt(nod_pt);
6263  }
6264  }
6265 
6266  // MemoryUsage::doc_memory_usage("after adding nodes back in");
6267 
6268  // Prune and rebuild mesh
6269  //-----------------------
6270 
6271  // Now remove the pruned nodes from the boundary lookup scheme
6272  this->prune_dead_nodes();
6273 
6274  // MemoryUsage::doc_memory_usage("after prune dead nodes");
6275 
6276  // And finally re-setup the boundary lookup scheme for elements
6278 
6279  // MemoryUsage::doc_memory_usage("after setup boundary info");
6280 
6281  // Re-setup tree forest if needed. (Call this every time even if
6282  // a (distributed) mesh has no elements on this processor.
6283  // We still need to participate in communication.)
6284  TreeBasedRefineableMeshBase* ref_mesh_pt =
6285  dynamic_cast<TreeBasedRefineableMeshBase*>(this);
6286  if (ref_mesh_pt != 0)
6287  {
6288  ref_mesh_pt->setup_tree_forest();
6289  }
6290 
6291  // MemoryUsage::doc_memory_usage("after setup tree forest");
6292 
6294  {
6295  t_end = TimingHelpers::timer();
6296  oomph_info
6297  << "Time for local rebuild of mesh from retained elements in "
6298  << " Mesh::prune_halo_elements_and_nodes(): " << t_end - t_start
6299  << std::endl;
6300  t_start = TimingHelpers::timer();
6301  }
6302 
6303  // Classify nodes
6304  classify_halo_and_haloed_nodes(doc_info, report_stats);
6305 
6307  {
6308  t_end = TimingHelpers::timer();
6309  oomph_info << "Time for Mesh::classify_halo_and_haloed_nodes() "
6310  << " Mesh::prune_halo_elements_and_nodes(): "
6311  << t_end - t_start << std::endl;
6312  t_start = TimingHelpers::timer();
6313  }
6314 
6315  // MemoryUsage::doc_memory_usage("after
6316  // classify_halo_and_haloed_nodes()");
6317 
6318  // Doc?
6319  //-----
6320  if (doc_info.is_doc_enabled())
6321  {
6322  oomph_info << "Outputting distribution in " << doc_info.directory()
6323  << " " << doc_info.number() << std::endl;
6324  doc_mesh_distribution(doc_info);
6325  }
6326 
6327  // Finally: Reorder the nodes within the mesh's node vector
6328  // to establish a standard ordering regardless of the sequence
6329  // of mesh refinements -- this is required to allow dump/restart
6330  // on refined meshes
6331  this->reorder_nodes();
6332 
6333 
6335  {
6336  t_end = TimingHelpers::timer();
6337  oomph_info << "Time for Mesh::reorder_nodes() "
6338  << " Mesh::prune_halo_elements_and_nodes(): "
6339  << t_end - t_start << std::endl;
6340  t_start = TimingHelpers::timer();
6341  }
6342 
6343  // MemoryUsage::doc_memory_usage("after reorder nodes");
6344 
6345  // Doc stats
6346  if (report_stats)
6347  {
6348  oomph_info
6349  << "\n----------------------------------------------------\n";
6350  oomph_info << "After pruning: Processor " << my_rank << " holds "
6351  << this->nelement() << " elements of which "
6352  << this->nroot_halo_element()
6353  << " are root halo elements \n while "
6354  << this->nroot_haloed_element()
6355  << " are root haloed elements" << std::endl;
6356 
6357  // Report total number of halo(ed) and shared nodes for this process
6358  oomph_info << "After pruning: Processor " << my_rank << " holds "
6359  << this->nnode() << " nodes of which " << this->nhalo_node()
6360  << " are halo nodes \n while " << this->nhaloed_node()
6361  << " are haloed nodes, and " << this->nshared_node()
6362  << " are shared nodes." << std::endl;
6363 
6364  // Report number of halo(ed) and shared nodes with each domain
6365  // from the current process
6366  for (int iproc = 0; iproc < n_proc; iproc++)
6367  {
6368  oomph_info << "After pruning: With process " << iproc
6369  << ", there are " << this->nhalo_node(iproc)
6370  << " halo nodes, and " << std::endl
6371  << this->nhaloed_node(iproc) << " haloed nodes, and "
6372  << this->nshared_node(iproc) << " shared nodes"
6373  << std::endl;
6374  }
6375  oomph_info
6376  << "----------------------------------------------------\n\n";
6377  }
6378  }
6379 
6380  // MemoryUsage::doc_memory_usage("end of mesh level prune");
6381  }
6382 
6383 
6384  //========================================================================
6385  /// Get efficiency of mesh distribution: In an ideal distribution
6386  /// without halo overhead, each processor would only hold its own
6387  /// elements. Efficieny per processor = (number of non-halo elements)/
6388  /// (total number of elements).
6389  //========================================================================
6390  void Mesh::get_efficiency_of_mesh_distribution(double& av_efficiency,
6391  double& max_efficiency,
6392  double& min_efficiency)
6393  {
6394  // Storage for number of processors and current processor
6395  int n_proc = Comm_pt->nproc();
6396