block_preconditioner.cc
Go to the documentation of this file.
1 // LIC// ====================================================================
2 // LIC// This file forms part of oomph-lib, the object-oriented,
3 // LIC// multi-physics finite-element library, available
4 // LIC// at http://www.oomph-lib.org.
5 // LIC//
6 // LIC// Copyright (C) 2006-2024 Matthias Heil and Andrew Hazel
7 // LIC//
8 // LIC// This library is free software; you can redistribute it and/or
9 // LIC// modify it under the terms of the GNU Lesser General Public
10 // LIC// License as published by the Free Software Foundation; either
11 // LIC// version 2.1 of the License, or (at your option) any later version.
12 // LIC//
13 // LIC// This library is distributed in the hope that it will be useful,
14 // LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // LIC// Lesser General Public License for more details.
17 // LIC//
18 // LIC// You should have received a copy of the GNU Lesser General Public
19 // LIC// License along with this library; if not, write to the Free Software
20 // LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 // LIC// 02110-1301 USA.
22 // LIC//
23 // LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk.
24 // LIC//
25 // LIC//====================================================================
26 #include "block_preconditioner.h"
27 
28 namespace oomph
29 {
30  /// Static boolean to allow block_matrix_test(...) to be run.
31  /// Defaults to false.
32  template<typename MATRIX>
34 
35 
36  //============================================================================
37  /// Determine the size of the matrix blocks and setup the
38  /// lookup schemes relating the global degrees of freedom with
39  /// their "blocks" and their indices (row/column numbers) in those
40  /// blocks.
41  /// The distributions of the preconditioner and the blocks are
42  /// automatically specified (and assumed to be uniform) at this
43  /// stage.
44  /// This method should be used if any block contains more than one
45  /// type of DOF. The argument vector dof_to_block_map should be of length
46  /// ndof. Each element should contain an integer indicating the block number
47  /// corresponding to that type of DOF.
48  //============================================================================
49  template<typename MATRIX>
51  const Vector<unsigned>& dof_to_block_map_in)
52  {
53 #ifdef PARANOID
54  // Subsidiary preconditioners don't really need the meshes
55  if (this->is_master_block_preconditioner())
56  {
57  std::ostringstream err_msg;
58  unsigned n = nmesh();
59  if (n == 0)
60  {
61  err_msg << "No meshes have been set for this block preconditioner!\n"
62  << "Set one with set_nmesh(...), set_mesh(...)" << std::endl;
63  throw OomphLibError(
64  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
65  for (unsigned m = 0; m < n; m++)
66  {
67  if (Mesh_pt[m] == 0)
68  {
69  err_msg << "The mesh pointer to mesh " << m << " is null!\n"
70  << "Set a non-null one with set_mesh(...)" << std::endl;
71  throw OomphLibError(
72  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
73  }
74  }
75  }
76  }
77 #endif
78 
79  // Create a copy of the vector input so that we can modify it below
80  Vector<unsigned> dof_to_block_map = dof_to_block_map_in;
81 
82  if (is_subsidiary_block_preconditioner())
83  {
84 #ifdef PARANOID
85  // Get the size of the Doftype_in_master_preconditioner_coarse.
86  unsigned para_doftype_in_master_preconditioner_coarse_size =
87  Doftype_in_master_preconditioner_coarse.size();
88 
89  // Check that the Doftype_in_master_preconditioner_coarse vector is not
90  // empty. This must be set (via the function
91  // turn_into_subsidiary_block_preconditioner) if this is a
92  // subsidiary block preconditioner.
93  if (para_doftype_in_master_preconditioner_coarse_size == 0)
94  {
95  std::ostringstream err_msg;
96  err_msg << "The mapping from the dof types of the master "
97  << "block preconditioner \n"
98  << "to the subsidiary block preconditioner is empty.\n"
99  << "Doftype_in_master_preconditioner_coarse.size() == 0 \n"
100  << "has turn_into_subsidiary_block_preconditioner(...)\n"
101  << "been called with the correct parameters?\n"
102  << std::endl;
103  throw OomphLibError(
104  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
105  }
106 
107 
108  // PARANOID checks for Doftype_coarsen_map_coarse
109  // This is also set in the function
110  // turn_into_subsidiary_block_preconditioner(...).
111  //
112  // The Doftype_coarsen_map_coarse vector must satisfy two conditions
113  // for it to be valid.
114  //
115  // 1) The dof type numbers in the dof_coarsen_map vector must be
116  // unique. For example, it does not make sense to have the vector
117  // [[0,1][1,2]] because the first inner vector says
118  // "treat dof types 0 and 1 as dof type 0" and the second inner vector
119  // says "treat dof type 1 and 2 as dof type 1", but dof type 1 is
120  // already being treated as dof type 0.
121  //
122  // 2) Every SUBSIDIARY dof type must be mapped to a dof type in the
123  // Doftype_coarsen_map_coarse vector.
124  // For example, if there are 5 dof types (passed down from the master
125  // block preconditioner), and this block subsidiary block
126  // preconditioner only deals with 3 dof types, then all 5 dof types
127  // must be mapped to a dof type in the subsidiary preconditioner. For
128  // example if the dof_map is [1,2,3,4,5], then the subsidiary block
129  // preconditioner knows that 5 dof types have been passed down. But if
130  // it only works with three dof types, we MUST have three inner vectors
131  // in the doftype_coarsen_map vector (which corresponds to dof types 0,
132  // 1 and 2), the union of the dof types in the three inner vectors must
133  // contain dof types 0, 1, 2, 3 and 4 exactly once. It cannot contain,
134  // say, 0, 1, 5, 7, 9, even though it passes the uniqueness check. We
135  // ensure this by two conditions:
136  //
137  // 2.1) The Doftype_coarsen_map_coarse vector must contain the same
138  // number of dof types as the dof_map vector.
139  // In other words, recall that Doftype_coarsen_map_coarse is a
140  // 2D vector, this must contain the same number of vectors as
141  // there are elements in the dof_to_block_map_in vector.
142  //
143  // 2.2) The maximum element in the doftype_coarsen_map_coarse vector
144  // is the length of the dof_map vector minus 1.
145 
146  // A set is deal for checking the above three conditions, we shall insert
147  // all the elements in the doftype_coarsen_map_coarse vector into this
148  // set.
149  std::set<unsigned> doftype_map_set;
150 
151  // Condition (1): Check for uniqueness by inserting all the values of
152  // Doftype_coarsen_map_coarse into a set.
153  unsigned para_doftype_coarsen_map_coarse_size =
154  Doftype_coarsen_map_coarse.size();
155 
156  // Loop through the outer vector of Doftype_coarsen_map_coarse
157  // then loop through the inner vectors and attempt to insert each
158  // element of Doftype_coarsen_map_coarse into doftype_map_set.
159  //
160  // The inner for loop will throw an error if we cannot insert the
161  // element, this means that it is already inserted and thus not unique.
162  for (unsigned i = 0; i < para_doftype_coarsen_map_coarse_size; i++)
163  {
164  // Loop through the inner vector
165  unsigned para_doftype_coarsen_map_coarse_i_size =
166  Doftype_coarsen_map_coarse[i].size();
167  for (unsigned j = 0; j < para_doftype_coarsen_map_coarse_i_size; j++)
168  {
169  // Attempt to insert all the values of the inner vector into a set.
170  std::pair<std::set<unsigned>::iterator, bool> doftype_map_ret =
171  doftype_map_set.insert(Doftype_coarsen_map_coarse[i][j]);
172 
173  if (!doftype_map_ret.second)
174  {
175  std::ostringstream err_msg;
176  err_msg << "Error: the doftype number "
177  << Doftype_coarsen_map_coarse[i][j]
178  << " is already inserted." << std::endl;
179  throw OomphLibError(
180  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
181  }
182  }
183  }
184 
185  // Condition (2.1): Check that the doftype_map_set describes as many
186  // values as doftype_in_master_preconditioner_coarse. I.e. if dof_map
187  // contains 5 dof types, then the doftype_coarsen_map_coarse vector must
188  // also contain 5 dof types.
189  if (para_doftype_in_master_preconditioner_coarse_size !=
190  doftype_map_set.size())
191  {
192  std::ostringstream err_msg;
193  err_msg << "The size of doftype_in_master_preconditioner_coarse "
194  << "must be the same as the total\n"
195  << "number of values in the doftype_coarsen_map_coarse vector."
196  << std::endl;
197  throw OomphLibError(
198  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
199  }
200 
201  // Condition (2.2): Check that the maximum element in the
202  // doftype_coarsen_map_coarse vector is the length of the
203  // doftype_in_master_preconditioner_coarse minus 1.
204  unsigned para_doftype_in_master_preconditioner_coarse_size_minus_one =
205  para_doftype_in_master_preconditioner_coarse_size - 1;
206  if (para_doftype_in_master_preconditioner_coarse_size_minus_one !=
207  *doftype_map_set.rbegin())
208  {
209  std::ostringstream err_msg;
210  err_msg << "The maximum dof type number in the "
211  << "doftype_coarsen_map vector must be "
212  << para_doftype_in_master_preconditioner_coarse_size_minus_one
213  << std::endl;
214  throw OomphLibError(
215  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
216  }
217 #endif
218 
219  // Set the mapping from the master preconditioner DOF types to the
220  // subsidiary preconditioner DOF types.
221  //
222  // IMPORTANT: Since DOF types may be coarsened in the master block
223  // preconditioner, this may no longer reflect the actual underlying dof
224  // types. We must get the actual underlying dof types for the
225  // block_setup(...) function to work properly so all the look up schemes
226  // for this (subsidiary) block preconditioner is correct and works
227  // properly, this is for backwards compatibility purposes and to make sure
228  // Richard Muddle's still works at this (subsidiary) level, although it
229  // may not be used.
230  //
231  // If we do not want to make it backwards compatible, we may as well
232  // kill the block_setup(...) for subsidiary block preconditioners -
233  // but other thing may break. Do it at your own risk (take time to
234  // fully understand the whole block preconditioning framework code).
235 
236  // Create the corresponding Doftype_in_master_preconditioner_fine and
237  // Doftype_coarsen_map_fine vectors.
238 
239  // First resize the vectors.
240  Doftype_in_master_preconditioner_fine.resize(0);
241  Doftype_coarsen_map_fine.resize(0);
242 
243  // The Doftype_in_master_preconditioner_fine vector is easy. We know that
244  // the Doftype_coarsen_map_fine in the master preconditioner must be
245  // constructed already. So we simply loop through the values in
246  // doftype_in_master_preconditioner_coarse, then get the most fine grain
247  // dof types from the master preconditioner's Doftype_coarsen_map_fine
248  // vector.
249  //
250  // For example, if the master preconditioner has the vector:
251  // Doftype_coarsen_map_fine = [0,1,2,3][4,5,6,7][8,9,10,11][12,13][14,15]
252  //
253  // and passes the two vectors
254  // doftype_in_master_preconditioner_coarse = [1,2,3]
255  // doftype_coarsen_map_coarse = [[0][1,2]]
256  //
257  // Then we want
258  // Doftype_in_master_preconditioner_fine = [4,5,6,7,8,9,10,11,12,13]
259  //
260  // We achieve this by looking up the corresponding fine dof types in the
261  // masters' Doftype_coarsen_map_fine vector which corresponds to the
262  // values in Doftype_in_master_preconditioner_coarse.
263  //
264  // That is, the values in Doftype_in_master_preconditioner_coarse gives us
265  // the index of sub vector we want in the master's
266  // Doftype_coarsen_map_fine vector.
267 
268 #ifdef PARANOID
269  // Check that the master block preconditioner's Doftype_coarsen_map_fine
270  // is set up. Under the current implementation, this would always be set
271  // up properly, but we check it just in case!
272  if (master_block_preconditioner_pt()->doftype_coarsen_map_fine().size() ==
273  0)
274  {
275  std::ostringstream err_msg;
276  err_msg << "The master block preconditioner's "
277  << "Doftype_coarsen_map_fine is not\n"
278  << "set up properly.\n"
279  << "\n"
280  << "This vector is constructed in the function "
281  << "block_setup(...).\n"
282  << std::endl;
283  throw OomphLibError(
284  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
285  }
286 #endif
287 
288  unsigned doftype_in_master_preconditioner_coarse_size =
289  Doftype_in_master_preconditioner_coarse.size();
290  for (unsigned i = 0; i < doftype_in_master_preconditioner_coarse_size;
291  i++)
292  {
293  // The index of the sub vector we want.
294  unsigned subvec_index = Doftype_in_master_preconditioner_coarse[i];
295 
296  // Get the corresponding most fine grain sub vector from the master
297  // block preconditioner
298  Vector<unsigned> tmp_master_dof_subvec =
299  Master_block_preconditioner_pt->get_fine_grain_dof_types_in(
300  subvec_index);
301 
302  Doftype_in_master_preconditioner_fine.insert(
303  Doftype_in_master_preconditioner_fine.end(),
304  tmp_master_dof_subvec.begin(),
305  tmp_master_dof_subvec.end());
306  }
307 
308  // The Doftype_coarsen_map_fine vector is a bit more tricky.
309  // The Doftype_coarsen_map_coarse vector describes which coarse dof types
310  // of THIS preconditioner are grouped together. We have to translate this
311  // into the most fine grain dof types.
312  //
313  // For example, if
314  // Doftype_coarsen_map_coarse = [[0][1,2]]
315  // Doftype_in_master_preconditioner_coarse = [1,2,3]
316  //
317  // and the MASTER preconditioner has:
318  // Doftype_coarsen_map_fine= [[0,1,2,3][4,5,6,7][8,9,10,11][12,13][14,15]]
319  //
320  // Then [[0][1,2]] tell us that the most fine grain DOF types 1 of the
321  // master preconditioner most be grouped together, and the most fine
322  // grained dof types 2 and 3 of the master preconditioner must be grouped
323  // together.
324  //
325  // This gives the vector [[4,5,6,7] [8,9,10,11,12,13]], translating this
326  // into the local DOF types of this preconditioner we have
327  // Doftype_coarsen_map_fine = [[0,1,2,3][4,5,6,7,8,9]]. This corresponds
328  // with the Doftype_in_master_preconditioner_fine vector we created above:
329  // Doftype_in_master_preconditioner_fine = [4,5,6,7,8,9,10,11,12,13]
330  //
331  // Together, the master block preconditioner says to THIS subsidiary block
332  // preconditioner "work on my DOF types [4,5,6,7,8,9,10,11,12,13], but
333  // group your DOF type [0,1,2,3] together as DOF type 0 and [4,5,6,7,8,9]
334  // together together as DOF type 1".
335  //
336  // Think of it like this: For each DOF type in Doftype_coarsen_map_coarse
337  // we look at how many values this corresponds to in the master
338  // preconditioner. In this case, Doftype_coarsen_map_coarse:
339  //
340  // 1 - corresponds to fine DOF types 0,1,2,3 in this preconditioner,
341  // and 4,5,6,7 in the master preconditioner;
342  //
343  // 2 - corresponds to fine DOF types 4,5,6,7 in this preconditioner,
344  // and 8,9,10,11 in the master preconditioner;
345  //
346  // 3 - corresponds to fine DOF types 8,9 in this preconditioner,
347  // and 12,13 in the master preconditioner.
348  //
349  // Thus Doftype_coarsen_map_fine = [[0,1,2,3][4,5,6,7,8,9]]
350  //
351  /// /////////////////////////////////////////////////////////////////////
352  //
353  // How to do this: First we create a 2D vector which has the corresponds
354  // to the fine dof types in the master preconditioner but starting from
355  // 0. For example, take the above example (repeated below):
356  // Passed to this prec by the master prec:
357  // Doftype_coarsen_map_coarse = [[0][1,2]]
358  // Doftype_in_master_preconditioner_coarse = [1,2,3]
359  //
360  // and the MASTER preconditioner has:
361  // Doftype_coarsen_map_fine= [[0,1,2,3][4,5,6,7][8,9,10,11][12,13][14,15]]
362  //
363  // Step 1:
364  // Then, the temp 2D vector we want to create is:
365  // master_fine_doftype_translated = [[0 1 2 3], [4,5,6,7], [8,9]]
366  // This comes from using Doftype_in_master_preconditioner_coarse
367  // then get the number of fine dof types in the master.
368  //
369  // Step 2:
370  // Then:
371  // Loop through the vector Doftype_coarsen_map_coarse,
372  // Loop over the inner vectors in Doftype_coarsen_map_coarse
373  // Each element in the inner vector corresponds to a vector in
374  // master_fine_doftype_translated. We push in the vectors of
375  // master_fine_doftype_translated intp Doftype_coarsen_map_fine
376  //
377 
378  Vector<Vector<unsigned>> master_fine_doftype_translated;
379  unsigned dof_type_index = 0;
380  for (unsigned i = 0; i < doftype_in_master_preconditioner_coarse_size;
381  i++)
382  {
383  // How many fine DOF types are in the master's
384  // Doftype_in_master_preconditioner_coarse[i]?
385  unsigned coarse_dof = Doftype_in_master_preconditioner_coarse[i];
386 
387  unsigned n_master_fine_doftypes =
388  Master_block_preconditioner_pt->nfine_grain_dof_types_in(coarse_dof);
389 
390  Vector<unsigned> tmp_sub_vec;
391  for (unsigned j = 0; j < n_master_fine_doftypes; j++)
392  {
393  tmp_sub_vec.push_back(dof_type_index);
394  dof_type_index++;
395  }
396  master_fine_doftype_translated.push_back(tmp_sub_vec);
397  }
398 
399 
400  // master_fine_doftype_translated now contains vectors with values are
401  // from 0, 1, 2, ..,
402  //
403  // Now read out the values of master_fine_doftype_translated and place
404  // them in order according to Doftype_coarsen_map_coarse.
405  unsigned doftype_coarsen_map_coarse_size =
406  Doftype_coarsen_map_coarse.size();
407  for (unsigned i = 0; i < doftype_coarsen_map_coarse_size; i++)
408  {
409  Vector<unsigned> tmp_vec;
410  unsigned doftype_coarsen_map_coarse_i_size =
411  Doftype_coarsen_map_coarse[i].size();
412  for (unsigned j = 0; j < doftype_coarsen_map_coarse_i_size; j++)
413  {
414  unsigned subvec_i = Doftype_coarsen_map_coarse[i][j];
415 
416  tmp_vec.insert(tmp_vec.end(),
417  master_fine_doftype_translated[subvec_i].begin(),
418  master_fine_doftype_translated[subvec_i].end());
419  }
420 
421  Doftype_coarsen_map_fine.push_back(tmp_vec);
422  }
423 
424  // Get the number of block types (and DOF types) in this preconditioner
425  // from the length of the dof_map vector.
426  Internal_ndof_types = Doftype_in_master_preconditioner_fine.size();
427 
428  // Nblock_types is later updated in block_setup(...)
429  Internal_nblock_types = Internal_ndof_types;
430 
431  // Compute number of rows in this (sub) preconditioner using data from
432  // the master.
433  Nrow = 0;
434  for (unsigned b = 0; b < Internal_ndof_types; b++)
435  {
436  Nrow += this->internal_dof_block_dimension(b);
437  }
438 
439 #ifdef PARANOID
440  if (Nrow == 0)
441  {
442  std::ostringstream error_message;
443  error_message
444  << "Nrow=0 in subsidiary preconditioner. This seems fishy and\n"
445  << "suggests that block_setup() was not called for the \n"
446  << "master block preconditioner yet.";
447  throw OomphLibWarning(error_message.str(),
448  OOMPH_CURRENT_FUNCTION,
449  OOMPH_EXCEPTION_LOCATION);
450  }
451 #endif
452  }
453 
454  // If this is a master block preconditioner, then set the
455  // Doftype_coarsen_map_fine and Doftype_coarsen_map_coarse to the
456  // identity. Recall that the Doftype_coarsen_map_fine maps the dof types
457  // that this preconditioner requires with the most fine grain dof types (the
458  // internal dof types) and the Doftype_coarsen_map_coarse maps the dof
459  // types that this preconditioner requires with the dof types which this
460  // preconditioner is given from a master preconditioner (these dof types may
461  // or may not be coarsened). In the case of the master preconditioner, these
462  // are the same (since dof types are not coarsened), furthermore the
463  // identity mapping is provided to say that dof type 0 maps to dof type 0,
464  // dof type 1 maps to dof type 1,
465  // dof type 2 maps to dof type 2,
466  // etc...
467  //
468  // If this is not a master block preconditioner, then the vectors
469  // Doftype_coarsen_map_fine and Doftype_coarsen_map_coarse is handled
470  // by the turn_into_subsidiary_block_preconditioner(...) function.
471  if (is_master_block_preconditioner())
472  {
473  // How many dof types does this preconditioner work with?
474  unsigned n_external_dof_types = dof_to_block_map.size();
475 
476  // Note: at the master level, the n_external_dof_types should be the same
477  // as the internal_ndof_types(), since the dof_to_block_map MUST describe
478  // the mapping between every dof type (not yet coarsened - so it is the
479  // same number as the internal dof types) to the block types. But we
480  // distinguish them for clarity. We also check that this is the case.
481 #ifdef PARANOID
482  unsigned n_internal_dof_types = internal_ndof_types();
483 
484  if (n_internal_dof_types != n_external_dof_types)
485  {
486  std::ostringstream err_msg;
487  err_msg
488  << "The internal ndof types and the length of the dof_to_block_map\n"
489  << "vector is not the same. Since this is the master block "
490  << "preconditioner,\n"
491  << "you must describe which block each DOF type belongs to,\n"
492  << "no more, no less."
493  << "internal_ndof_types = " << n_internal_dof_types << "\n"
494  << "dof_to_block_map.size() = " << n_external_dof_types << "\n";
495  throw OomphLibWarning(
496  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
497  }
498 #endif
499 
500  // Clear and reserve space.
501  Doftype_coarsen_map_fine.clear();
502  Doftype_coarsen_map_coarse.clear();
503  Doftype_coarsen_map_fine.reserve(n_external_dof_types);
504  Doftype_coarsen_map_coarse.reserve(n_external_dof_types);
505 
506  // Now push back the identity mapping.
507  for (unsigned i = 0; i < n_external_dof_types; i++)
508  {
509  // Create a vector and push it in.
510  Vector<unsigned> tmp_vec(1, i);
511  Doftype_coarsen_map_fine.push_back(tmp_vec);
512  Doftype_coarsen_map_coarse.push_back(tmp_vec);
513  }
514  }
515  else
516  // Else this is a subsidiary block preconditioner.
517  {
518  // Both the Doftype_coarsen_map_fine and Doftype_coarsen_map_coarse
519  // vectors must be already be handled by the
520  // turn_into_subsidiary_block_preconditioner(...) function. We check this.
521 #ifdef PARANOID
522  if ((Doftype_coarsen_map_fine.size() == 0) ||
523  (Doftype_coarsen_map_coarse.size() == 0))
524  {
525  std::ostringstream err_msg;
526  err_msg << "Either the Doftype_coarsen_map_fine or the \n"
527  << "Doftype_coarsen_map_coarse vectors is of size 0.\n"
528  << "Did you remember to call the function "
529  << "turn_into_subsidiary_block_preconditioner(...)?";
530  throw OomphLibWarning(
531  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
532  }
533 #endif
534  }
535 
536 
537  // Now we create the vector Block_to_dof_map_coarse.
538  // Recall that the vector describe which dof types are in which block with
539  // the relationship:
540  //
541  // Block_to_dof_map_coarse[block_number] = Vector[dof types];
542  //
543  // Note that this is not the internal (underlying) dof type.
544  // Nor is this in relation to the parent block preconditioner's dof type.
545  // The number of elements in it is the same as dof_to_block_map vector.
546  //
547  // Since the dof type coarsening feature is added later, we encapsulate this
548  // bit of the code so it does not affect things below.
549  {
550  // Check that the dof_to_block map "makes sense" for the
551  // Doftype_coarsen_map_coarse.
552  // The Doftype_coarsen_map_coarse describes which doftypes should be
553  // considered as a single doftype in THIS preconditioner.
554  //
555  // For example, if this preconditioner is the LSC block preconditioner
556  // applied to a 3D problem, it expects 4 doftypes:
557  // 3 velocity, [u, v, w] and 1 pressure [p],
558  // giving us the dof type ordering
559  // [u v w p].
560  //
561  // The LSC preconditioner groups the velocity and pressure doftypes
562  // separately, thus the dof_to_block_map will be:
563  // [0 0 0 1]
564  //
565  // Then the Doftype_coarsen_map_coarse MUST have length 4, to identify
566  // which of the OTHER (possibly coarsened) dof types should be grouped
567  // together to be considered as a single dof types of THIS preconditioner.
568  //
569  // For example, if the preconditioner above this one has the dof type
570  // ordering:
571  // 0 1 2 3 4 5 6 7 8 9
572  // ub vb wb up vp wp ut vt wt p
573  // Then we want to tell THIS preconditioner which dof types belongs to
574  // u, v, w and p, by providing the optional argument
575  // Doftype_coarsen_map_coarse to the
576  // turn_into_subsidiary_block_preconditioner(...) function
577  // [[0 3 6] [1 4 7] [2 5 8] [9]]
578  //
579  // So, it is important that the length of dof_to_block_map is the same as
580  // the length of Doftype_coarsen_map_coarse. We check this.
581  unsigned dof_to_block_map_size = dof_to_block_map.size();
582 
583 #ifdef PARANOID
584  if (dof_to_block_map_size != Doftype_coarsen_map_coarse.size())
585  {
586  std::ostringstream err_msg;
587  err_msg
588  << "The size of dof_to_block_map and Doftype_coarsen_map_coarse is "
589  "not "
590  << "the same.\n"
591  << "dof_to_block_map.size() = " << dof_to_block_map_size << "\n"
592  << "Doftype_coarsen_map_coarse.size() = "
593  << Doftype_coarsen_map_coarse.size() << ".\n"
594  << "One of the two list is incorrect, please look at the comments\n"
595  << "in the source code for more details.";
596  throw OomphLibWarning(
597  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
598  }
599 #endif
600 
601  // Create the Block_to_dof_map_coarse from
602  // the dof_to_block_map and Doftype_coarsen_map_coarse.
603 
604  // find the maximum block number
605  unsigned max_block_number =
606  *std::max_element(dof_to_block_map.begin(), dof_to_block_map.end());
607 
608  // Now we do the following:
609  // Lets say the Doftype_coarsen_map_coarse is:
610  // [0 3 6]
611  // [1 4 7]
612  // [2 5 8]
613  // [9]
614  //
615  // (this is the same as the above example)
616  //
617  // and the dof_to_block_map is [0 0 0 1].
618  //
619  // Then we need to form the Block_to_dof_map_coarse:
620  // [0 3 6 1 4 7 2 5 8]
621  // [9]
622 
623  // Clear anything in the Block_to_dof_map_coarse
624  Block_to_dof_map_coarse.clear();
625 
626  const unsigned tmp_nblock = max_block_number + 1;
627 
628  Block_to_dof_map_coarse.resize(tmp_nblock);
629 
630  for (unsigned i = 0; i < dof_to_block_map_size; i++)
631  {
632  Block_to_dof_map_coarse[dof_to_block_map[i]].push_back(i);
633  }
634 
635  Block_to_dof_map_fine.clear();
636  Block_to_dof_map_fine.resize(tmp_nblock);
637  for (unsigned block_i = 0; block_i < tmp_nblock; block_i++)
638  {
639  // get the dof types in this block.
640  const unsigned ndof_in_block = Block_to_dof_map_coarse[block_i].size();
641  for (unsigned dof_i = 0; dof_i < ndof_in_block; dof_i++)
642  {
643  const unsigned coarsened_dof_i =
644  Block_to_dof_map_coarse[block_i][dof_i];
645 
646  // Insert the most fine grain dofs which this dof_i corresponds to
647  // into block_i
648  Vector<unsigned> dof_i_dofs =
649  Doftype_coarsen_map_fine[coarsened_dof_i];
650 
651  Block_to_dof_map_fine[block_i].insert(
652  Block_to_dof_map_fine[block_i].end(),
653  dof_i_dofs.begin(),
654  dof_i_dofs.end());
655  }
656  }
657 
658  // Now set the dof_to_block_map to the identify.
659  // NOTE: We are now using the internal n dof types. This is because the
660  // dof type coarsening feature was built on top of the existing block
661  // preconditioning framework which does not handle coarsening of dof
662  // types. Hence, under the hood, it still works with the most fine grain
663  // dof types and does not do any coarsening.
664 
665  // Locally cache the internal ndof types (using access function because
666  // the Internal_ndof_type variable may not be set up yet if this is a
667  // master preconditioner).
668  unsigned tmp_internal_ndof_types = internal_ndof_types();
669 
670  dof_to_block_map.resize(tmp_internal_ndof_types, 0);
671 
672  for (unsigned i = 0; i < tmp_internal_ndof_types; i++)
673  {
674  dof_to_block_map[i] = i;
675  }
676  } // end of Block_to_dof_map_coarse encapsulation
677 
678 #ifdef PARANOID
679 
680  // Check that the meshes are ok. This only needs to be done in the master
681  // because subsidiary preconditioners don't do anything with the meshes
682  // here.
683  if (is_master_block_preconditioner())
684  {
685  // This is declared as local_nmesh because there are other variables
686  // called nmesh floating about... but this will not exist if PARANOID is
687  // switched on.
688  unsigned local_nmesh = nmesh();
689 
690  // Check that some mesh pointers have been assigned.
691  if (local_nmesh == 0)
692  {
693  std::ostringstream error_msg;
694  error_msg << "Cannot setup blocks because no meshes have been set.";
695  throw OomphLibError(
696  error_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
697  }
698 
699  // Each mesh must contain elements with the same number of dof.
700  // A stricter check is to ensure that the mesh contains only one type of
701  // elements. This is relaxed in same cases.
702  for (unsigned mesh_i = 0; mesh_i < local_nmesh; mesh_i++)
703  {
704  // The number of elements in the current mesh.
705  unsigned n_element = mesh_pt(mesh_i)->nelement();
706 
707  // When the bulk mesh is distributed, there may not be any elements
708  // in the surface mesh(es).
709  if (n_element > 0)
710  {
711  // The string of the first element in the current mesh.
712  std::string first_element_string =
713  typeid(*(mesh_pt(mesh_i)->element_pt(0))).name();
714 
715  // If there are multiple element types in the current mesh,
716  // we can at least make sure that they contain the same types of DOFs.
717  if (bool(Allow_multiple_element_type_in_mesh[mesh_i]))
718  {
719  // The ndof types of the first element.
720  unsigned first_element_ndof_type =
721  mesh_pt(mesh_i)->element_pt(0)->ndof_types();
722 
723  // Loop through the meshes and compare the number of types of DOFs.
724  for (unsigned el_i = 1; el_i < n_element; el_i++)
725  {
726  // The ndof type of the current element.
727  unsigned current_element_ndof_type =
728  mesh_pt(mesh_i)->element_pt(el_i)->ndof_types();
729 
730  // The string of the current element.
731  std::string current_element_string =
732  typeid(*(mesh_pt(mesh_i)->element_pt(el_i))).name();
733 
734  // Compare against the first element.
735  if (current_element_ndof_type != first_element_ndof_type)
736  {
737  std::ostringstream error_message;
738  error_message
739  << "Elements in the same mesh MUST have the same number of "
740  "types "
741  << "of DOFs.\n"
742  << "The element in mesh " << mesh_i << ", at position "
743  << el_i << " is: \n"
744  << current_element_string << ", \n"
745  << "with ndof types: " << current_element_ndof_type << ".\n"
746  << "The first element in the same mesh is: \n"
747  << first_element_string << ", \n"
748  << "with ndof types: " << first_element_ndof_type << ".\n";
749  throw OomphLibError(error_message.str(),
750  OOMPH_CURRENT_FUNCTION,
751  OOMPH_EXCEPTION_LOCATION);
752  }
753  }
754  }
755  else
756  // There should be only one type of elements in the current mesh.
757  // Check that this is the case!
758  {
759  // Loop through the elements in the current mesh.
760  for (unsigned el_i = 1; el_i < n_element; el_i++)
761  {
762  // The string of the current element.
763  std::string current_element_string =
764  typeid(*(mesh_pt(mesh_i)->element_pt(el_i))).name();
765 
766  // Compare against the first element.
767  if (current_element_string.compare(first_element_string) != 0)
768  {
769  std::ostringstream error_message;
770  error_message
771  << "By default, a mesh containing block preconditionable "
772  << "elements must contain only one type of element.\n"
773  << "The element in mesh " << mesh_i << ", at position "
774  << el_i << " is: \n"
775  << current_element_string << "\n"
776  << "The first element in the same mesh is: \n"
777  << first_element_string << "\n"
778  << "If this is correct, consider calling the set_mesh(...) "
779  "with\n"
780  << "the optional argument set true to allow multiple "
781  "element\n"
782  << "types in the same mesh.\n"
783  << "Note: A minimal requirement is that the elements in the "
784  "same\n"
785  << "mesh MUST have the same number of DOF types.";
786  throw OomphLibError(error_message.str(),
787  OOMPH_CURRENT_FUNCTION,
788  OOMPH_EXCEPTION_LOCATION);
789  }
790  }
791  }
792  }
793  }
794  }
795 
796 #endif
797  // clear the memory
798  this->clear_block_preconditioner_base();
799 
800  // get my_rank and nproc
801 #ifdef OOMPH_HAS_MPI
802  unsigned my_rank = comm_pt()->my_rank();
803  unsigned nproc = comm_pt()->nproc();
804 #endif
805 
806 
807  /// //////////////////////////////////////////////////////////////////////////
808  // start of master block preconditioner only operations
809  /// //////////////////////////////////////////////////////////////////////////
810 #ifdef OOMPH_HAS_MPI
811  unsigned* nreq_sparse = new unsigned[nproc]();
812  unsigned* nreq_sparse_for_proc = new unsigned[nproc]();
813  unsigned** index_in_dof_block_sparse_send = new unsigned*[nproc]();
814  unsigned** dof_number_sparse_send = new unsigned*[nproc]();
815  Vector<MPI_Request> send_requests_sparse;
816  Vector<MPI_Request> recv_requests_sparse;
817 #endif
818 
819  // If this preconditioner is the master preconditioner then we need
820  // to assemble the vectors : Dof_number
821  // Index_in_dof_block
822  if (is_master_block_preconditioner())
823  {
824  // Get the number of dof types in each mesh.
825  Ndof_types_in_mesh.assign(nmesh(), 0);
826  for (unsigned i = 0; i < nmesh(); i++)
827  {
828  Ndof_types_in_mesh[i] = mesh_pt(i)->ndof_types();
829  }
830  // Setup the distribution of this preconditioner, assumed to be the same
831  // as the matrix if the matrix is distributable.
832  if (dynamic_cast<DistributableLinearAlgebraObject*>(matrix_pt()))
833  {
834  this->build_distribution(
835  dynamic_cast<DistributableLinearAlgebraObject*>(matrix_pt())
836  ->distribution_pt());
837  }
838  else
839  {
840  LinearAlgebraDistribution dist(comm_pt(), matrix_pt()->nrow(), false);
841  this->build_distribution(dist);
842  }
843  Nrow = matrix_pt()->nrow();
844 
845  // Boolean to indicate whether the matrix is actually distributed,
846  // ie distributed and on more than one processor.
847  bool matrix_distributed =
848  (this->distribution_pt()->distributed() &&
849  this->distribution_pt()->communicator_pt()->nproc() > 1);
850 
851 
852  // Matrix must be a CR matrix.
853  CRDoubleMatrix* cr_matrix_pt = dynamic_cast<CRDoubleMatrix*>(matrix_pt());
854 
855  if (cr_matrix_pt == 0)
856  {
857  std::ostringstream error_message;
858  error_message << "Block setup for distributed matrices only works "
859  << "for CRDoubleMatrices";
860  throw OomphLibError(error_message.str(),
861  OOMPH_CURRENT_FUNCTION,
862  OOMPH_EXCEPTION_LOCATION);
863  }
864 
865 
866  // Get distribution.
867  unsigned first_row = this->distribution_pt()->first_row();
868  unsigned nrow_local = this->distribution_pt()->nrow_local();
869  unsigned last_row = first_row + nrow_local - 1;
870 
871 #ifdef OOMPH_HAS_MPI
872  // storage for the rows required by each processor in the dense
873  // block lookup storage scheme
874  // dense_required_rows(p,0) is the minimum global index required by proc p
875  // ...(p,1) is the maximum global index required by proc p
876  DenseMatrix<unsigned> dense_required_rows(nproc, 2);
877  for (unsigned p = 0; p < nproc; p++)
878  {
879  dense_required_rows(p, 0) = this->distribution_pt()->first_row(p);
880  dense_required_rows(p, 1) = this->distribution_pt()->first_row(p) +
881  this->distribution_pt()->nrow_local(p) - 1;
882  }
883 
884  // determine the global rows That are not in the range first_row to
885  // first_row+nrow_local for which we should store the
886  // Dof_index and Index_in_dof_block for
887  // then send the lists to other processors
888  std::set<unsigned> sparse_global_rows_for_block_lookup;
889  if (matrix_distributed)
890  {
891  unsigned nnz = cr_matrix_pt->nnz();
892  int* column_index = cr_matrix_pt->column_index();
893  for (unsigned i = 0; i < nnz; i++)
894  {
895  unsigned ci = column_index[i];
896  if (ci < first_row || ci > last_row)
897  {
898  sparse_global_rows_for_block_lookup.insert(ci);
899  }
900  }
901  }
902 
903  int nsparse = sparse_global_rows_for_block_lookup.size();
904 
905  Global_index_sparse.resize(0);
906  std::copy(sparse_global_rows_for_block_lookup.begin(),
907  sparse_global_rows_for_block_lookup.end(),
908  std::back_inserter(Global_index_sparse));
909 
910  Index_in_dof_block_sparse.resize(nsparse);
911  Dof_number_sparse.resize(nsparse);
912  sparse_global_rows_for_block_lookup.clear();
913 
914  Vector<MPI_Request> recv_requests_sparse_nreq;
915  if (matrix_distributed)
916  {
917  MPI_Aint base_displacement_sparse;
918  MPI_Get_address(nreq_sparse, &base_displacement_sparse);
919 
920  int zero = 0;
921  for (unsigned p = 0; p < nproc; p++)
922  {
923  // determine the global eqn numbers required by this processor
924  // that can be classified by processor p
925  int begin = 0;
926  for (int i = 0; i < nsparse; ++i)
927  {
928  if (Global_index_sparse[i] < dense_required_rows(p, 0))
929  {
930  ++begin;
931  }
932  else
933  {
934  if (Global_index_sparse[i] <= dense_required_rows(p, 1))
935  {
936  ++nreq_sparse[p];
937  }
938  else
939  {
940  break;
941  }
942  }
943  }
944 
945  // if this processor has rows to be classified by proc p
946  if (nreq_sparse[p] > 0)
947  {
948  // send the number of global eqn numbers
949  MPI_Request req1;
950  MPI_Isend(&nreq_sparse[p],
951  1,
952  MPI_UNSIGNED,
953  p,
954  31,
955  comm_pt()->mpi_comm(),
956  &req1);
957  send_requests_sparse.push_back(req1);
958 
959  // send the global eqn numbers
960  MPI_Request req2;
961  MPI_Isend(&Global_index_sparse[begin],
962  nreq_sparse[p],
963  MPI_UNSIGNED,
964  p,
965  32,
966  comm_pt()->mpi_comm(),
967  &req2);
968  send_requests_sparse.push_back(req2);
969 
970  // post the recvs for the data that will be returned
971 
972  // the datatypes, displacements, lengths for the two datatypes
973  MPI_Datatype types[2];
974  MPI_Aint displacements[2];
975  int lengths[2];
976 
977  // index in dof block
978  MPI_Type_contiguous(nreq_sparse[p], MPI_UNSIGNED, &types[0]);
979  MPI_Type_commit(&types[0]);
980  MPI_Get_address(&Index_in_dof_block_sparse[begin],
981  &displacements[0]);
982  displacements[0] -= base_displacement_sparse;
983  lengths[0] = 1;
984 
985  // dof number
986  MPI_Type_contiguous(nreq_sparse[p], MPI_UNSIGNED, &types[1]);
987  MPI_Type_commit(&types[1]);
988  MPI_Get_address(&Dof_number_sparse[begin], &displacements[1]);
989  displacements[1] -= base_displacement_sparse;
990  lengths[1] = 1;
991 
992  // build the final type
993  MPI_Datatype recv_type;
994  MPI_Type_create_struct(
995  2, lengths, displacements, types, &recv_type);
996  MPI_Type_commit(&recv_type);
997  MPI_Type_free(&types[0]);
998  MPI_Type_free(&types[1]);
999 
1000  // and recv
1001  MPI_Request req;
1002  MPI_Irecv(
1003  nreq_sparse, 1, recv_type, p, 33, comm_pt()->mpi_comm(), &req);
1004  recv_requests_sparse.push_back(req);
1005  MPI_Type_free(&recv_type);
1006  }
1007 
1008  // if no communication required, confirm this
1009  if (nreq_sparse[p] == 0)
1010  {
1011  MPI_Request req1;
1012  MPI_Isend(
1013  &zero, 1, MPI_UNSIGNED, p, 31, comm_pt()->mpi_comm(), &req1);
1014  send_requests_sparse.push_back(req1);
1015  }
1016 
1017  //
1018  MPI_Request req;
1019  MPI_Irecv(&nreq_sparse_for_proc[p],
1020  1,
1021  MPI_UNSIGNED,
1022  p,
1023  31,
1024  comm_pt()->mpi_comm(),
1025  &req);
1026  recv_requests_sparse_nreq.push_back(req);
1027  }
1028  }
1029 #endif
1030 
1031  // resize the storage
1032  Dof_number_dense.resize(nrow_local);
1033  Index_in_dof_block_dense.resize(nrow_local);
1034 
1035  // zero the number of dof types
1036  Internal_ndof_types = 0;
1037 
1038 #ifdef PARANOID
1039  // Vector to keep track of previously assigned block numbers
1040  // to check consistency between multiple assignments.
1041  Vector<int> previously_assigned_block_number(nrow_local,
1043 #endif
1044 
1045  // determine whether the problem is distribution
1046  bool problem_distributed = false;
1047 
1048  // the problem method distributed() is only accessible with MPI
1049 #ifdef OOMPH_HAS_MPI
1050  problem_distributed = any_mesh_distributed();
1051 #endif
1052 
1053  // if the problem is not distributed
1054  if (!problem_distributed)
1055  {
1056  // Offset for the block type in the overall system.
1057  // Different meshes contain different block-preconditionable
1058  // elements -- their blocks are added one after the other.
1059  unsigned dof_offset = 0;
1060 
1061  // Loop over all meshes.
1062  for (unsigned m = 0; m < nmesh(); m++)
1063  {
1064  // Number of elements in this mesh.
1065  unsigned n_element = mesh_pt(m)->nelement();
1066 
1067  // Find the number of block types that the elements in this mesh
1068  // are in charge of.
1069  unsigned ndof_in_element = ndof_types_in_mesh(m);
1070  Internal_ndof_types += ndof_in_element;
1071 
1072  for (unsigned e = 0; e < n_element; e++)
1073  {
1074  // List containing pairs of global equation number and
1075  // dof number for each global dof in an element.
1076  std::list<std::pair<unsigned long, unsigned>> dof_lookup_list;
1077 
1078  // Get list of blocks associated with the element's global unknowns.
1079  mesh_pt(m)->element_pt(e)->get_dof_numbers_for_unknowns(
1080  dof_lookup_list);
1081 
1082  // Loop over all entries in the list
1083  // and store the block number.
1084  typedef std::list<std::pair<unsigned long, unsigned>>::iterator IT;
1085  for (IT it = dof_lookup_list.begin(); it != dof_lookup_list.end();
1086  it++)
1087  {
1088  unsigned long global_dof = it->first;
1089  if (global_dof >= unsigned(first_row) &&
1090  global_dof <= unsigned(last_row))
1091  {
1092  unsigned dof_number = (it->second) + dof_offset;
1093  Dof_number_dense[global_dof - first_row] = dof_number;
1094 
1095 #ifdef PARANOID
1096  // Check consistency of block numbers if assigned multiple times
1097  if (previously_assigned_block_number[global_dof - first_row] <
1098  0)
1099  {
1100  previously_assigned_block_number[global_dof - first_row] =
1101  dof_number;
1102  }
1103 #endif
1104  }
1105  }
1106  }
1107 
1108  // About to do the next mesh which contains block preconditionable
1109  // elements of a different type; all the dofs that these elements are
1110  // "in charge of" differ from the ones considered so far.
1111  // Bump up the block counter to make sure we're not overwriting
1112  // anything here
1113  dof_offset += ndof_in_element;
1114  }
1115 
1116 #ifdef PARANOID
1117  // check that every global equation number has been allocated a dof type
1118  for (unsigned i = 0; i < nrow_local; i++)
1119  {
1120  if (previously_assigned_block_number[i] < 0)
1121  {
1122  std::ostringstream error_message;
1123  error_message << "Not all degrees of freedom have had DOF type "
1124  << "numbers allocated. Dof number " << i
1125  << " is unallocated.";
1126  throw OomphLibError(error_message.str(),
1127  OOMPH_CURRENT_FUNCTION,
1128  OOMPH_EXCEPTION_LOCATION);
1129  }
1130  }
1131 #endif
1132  }
1133  // else the problem is distributed
1134  else
1135  {
1136 #ifdef OOMPH_HAS_MPI
1137  // Offset for the block type in the overall system.
1138  // Different meshes contain different block-preconditionable
1139  // elements -- their blocks are added one after the other...
1140  unsigned dof_offset = 0;
1141 
1142  // the set of global degrees of freedom and their corresponding dof
1143  // number on this processor
1144  std::map<unsigned long, unsigned> my_dof_map;
1145 
1146  // Loop over all meshes
1147  for (unsigned m = 0; m < nmesh(); m++)
1148  {
1149  // Number of elements in this mesh
1150  unsigned n_element = this->mesh_pt(m)->nelement();
1151 
1152  // Find the number of block types that the elements in this mesh
1153  // are in charge of
1154  unsigned ndof_in_element = ndof_types_in_mesh(m);
1155  Internal_ndof_types += ndof_in_element;
1156 
1157  // Loop over all elements
1158  for (unsigned e = 0; e < n_element; e++)
1159  {
1160  // if the element is not a halo element
1161  if (!this->mesh_pt(m)->element_pt(e)->is_halo())
1162  {
1163  // List containing pairs of global equation number and
1164  // dof number for each global dof in an element
1165  std::list<std::pair<unsigned long, unsigned>> dof_lookup_list;
1166 
1167  // Get list of blocks associated with the element's global
1168  // unknowns
1169  this->mesh_pt(m)->element_pt(e)->get_dof_numbers_for_unknowns(
1170  dof_lookup_list);
1171 
1172  // update the block numbers and put it in the map.
1173  typedef std::list<std::pair<unsigned long, unsigned>>::iterator
1174  IT;
1175  for (IT it = dof_lookup_list.begin(); it != dof_lookup_list.end();
1176  it++)
1177  {
1178  it->second = (it->second) + dof_offset;
1179  my_dof_map[it->first] = it->second;
1180  }
1181  }
1182  }
1183 
1184  // About to do the next mesh which contains block preconditionable
1185  // elements of a different type; all the dofs that these elements are
1186  // "in charge of" differ from the ones considered so far.
1187  // Bump up the block counter to make sure we're not overwriting
1188  // anything here
1189  dof_offset += ndof_in_element;
1190  }
1191 
1192  // next copy the map of my dofs to two vectors to send
1193  unsigned my_ndof = my_dof_map.size();
1194  unsigned long* my_global_dofs = new unsigned long[my_ndof];
1195  unsigned* my_dof_numbers = new unsigned[my_ndof];
1196  typedef std::map<unsigned long, unsigned>::iterator IT;
1197  unsigned pt = 0;
1198  for (IT it = my_dof_map.begin(); it != my_dof_map.end(); it++)
1199  {
1200  my_global_dofs[pt] = it->first;
1201  my_dof_numbers[pt] = it->second;
1202  pt++;
1203  }
1204 
1205  // and then clear the map
1206  my_dof_map.clear();
1207 
1208  // count up how many DOFs need to be sent to each processor
1209  int* first_dof_to_send = new int[nproc];
1210  int* ndof_to_send = new int[nproc];
1211  unsigned ptr = 0;
1212  for (unsigned p = 0; p < nproc; p++)
1213  {
1214  first_dof_to_send[p] = 0;
1215  ndof_to_send[p] = 0;
1216  while (ptr < my_ndof &&
1217  my_global_dofs[ptr] < dense_required_rows(p, 0))
1218  {
1219  ptr++;
1220  }
1221  first_dof_to_send[p] = ptr;
1222  while (ptr < my_ndof &&
1223  my_global_dofs[ptr] <= dense_required_rows(p, 1))
1224  {
1225  ndof_to_send[p]++;
1226  ptr++;
1227  }
1228  }
1229 
1230  // next communicate to each processor how many dofs it expects to recv
1231  int* ndof_to_recv = new int[nproc];
1232  MPI_Alltoall(ndof_to_send,
1233  1,
1234  MPI_INT,
1235  ndof_to_recv,
1236  1,
1237  MPI_INT,
1238  comm_pt()->mpi_comm());
1239 
1240  // the base displacements for the sends
1241  MPI_Aint base_displacement;
1242  MPI_Get_address(my_global_dofs, &base_displacement);
1243 
1244 #ifdef PARANOID
1245  // storage for paranoid check to ensure that every row as been
1246  // imported
1247  std::vector<bool> dof_recv(nrow_local, false);
1248 #endif
1249 
1250  // next send and recv
1251  Vector<MPI_Request> send_requests;
1252  Vector<MPI_Request> recv_requests;
1253  Vector<unsigned long*> global_dofs_recv(nproc, 0);
1254  Vector<unsigned*> dof_numbers_recv(nproc, 0);
1255  Vector<unsigned> proc;
1256  for (unsigned p = 0; p < nproc; p++)
1257  {
1258  if (p != my_rank)
1259  {
1260  // send
1261  if (ndof_to_send[p] > 0)
1262  {
1263  // the datatypes, displacements, lengths for the two datatypes
1264  MPI_Datatype types[2];
1265  MPI_Aint displacements[2];
1266  int lengths[2];
1267 
1268  // my global dofs
1269  MPI_Type_contiguous(
1270  ndof_to_send[p], MPI_UNSIGNED_LONG, &types[0]);
1271  MPI_Type_commit(&types[0]);
1272  MPI_Get_address(my_global_dofs + first_dof_to_send[p],
1273  &displacements[0]);
1274  displacements[0] -= base_displacement;
1275  lengths[0] = 1;
1276 
1277  // my dof numbers
1278  MPI_Type_contiguous(ndof_to_send[p], MPI_UNSIGNED, &types[1]);
1279  MPI_Type_commit(&types[1]);
1280  MPI_Get_address(my_dof_numbers + first_dof_to_send[p],
1281  &displacements[1]);
1282  displacements[1] -= base_displacement;
1283  lengths[1] = 1;
1284 
1285  // build the final type
1286  MPI_Datatype send_type;
1287  MPI_Type_create_struct(
1288  2, lengths, displacements, types, &send_type);
1289  MPI_Type_commit(&send_type);
1290  MPI_Type_free(&types[0]);
1291  MPI_Type_free(&types[1]);
1292 
1293  // and send
1294  MPI_Request req;
1295  MPI_Isend(my_global_dofs,
1296  1,
1297  send_type,
1298  p,
1299  2,
1300  comm_pt()->mpi_comm(),
1301  &req);
1302  send_requests.push_back(req);
1303  MPI_Type_free(&send_type);
1304  }
1305 
1306  // and recv
1307  if (ndof_to_recv[p] > 0)
1308  {
1309  // resize the storage
1310  global_dofs_recv[p] = new unsigned long[ndof_to_recv[p]];
1311  dof_numbers_recv[p] = new unsigned[ndof_to_recv[p]];
1312  proc.push_back(p);
1313 
1314  // the datatypes, displacements, lengths for the two datatypes
1315  MPI_Datatype types[2];
1316  MPI_Aint displacements[2];
1317  int lengths[2];
1318 
1319  // my global dofs
1320  MPI_Type_contiguous(
1321  ndof_to_recv[p], MPI_UNSIGNED_LONG, &types[0]);
1322  MPI_Type_commit(&types[0]);
1323  MPI_Get_address(global_dofs_recv[p], &displacements[0]);
1324  displacements[0] -= base_displacement;
1325  lengths[0] = 1;
1326 
1327  // my dof numbers
1328  MPI_Type_contiguous(ndof_to_recv[p], MPI_UNSIGNED, &types[1]);
1329  MPI_Type_commit(&types[1]);
1330  MPI_Get_address(dof_numbers_recv[p], &displacements[1]);
1331  displacements[1] -= base_displacement;
1332  lengths[1] = 1;
1333 
1334  // build the final type
1335  MPI_Datatype recv_type;
1336  MPI_Type_create_struct(
1337  2, lengths, displacements, types, &recv_type);
1338  MPI_Type_commit(&recv_type);
1339  MPI_Type_free(&types[0]);
1340  MPI_Type_free(&types[1]);
1341 
1342  // and recv
1343  MPI_Request req;
1344  MPI_Irecv(my_global_dofs,
1345  1,
1346  recv_type,
1347  p,
1348  2,
1349  comm_pt()->mpi_comm(),
1350  &req);
1351  recv_requests.push_back(req);
1352  MPI_Type_free(&recv_type);
1353  }
1354  }
1355  // send to self
1356  else
1357  {
1358  unsigned u = first_dof_to_send[p] + ndof_to_recv[p];
1359  for (unsigned i = first_dof_to_send[p]; i < u; i++)
1360  {
1361 #ifdef PARANOID
1362  // indicate that this dof has ben recv
1363  dof_recv[my_global_dofs[i] - first_row] = true;
1364 #endif
1365  Dof_number_dense[my_global_dofs[i] - first_row] =
1366  my_dof_numbers[i];
1367  }
1368  }
1369  }
1370 
1371  // recv and store the data
1372  unsigned c_recv = recv_requests.size();
1373  while (c_recv > 0)
1374  {
1375  // wait for any communication to finish
1376  int req_number;
1377  MPI_Waitany(
1378  c_recv, &recv_requests[0], &req_number, MPI_STATUS_IGNORE);
1379  recv_requests.erase(recv_requests.begin() + req_number);
1380  c_recv--;
1381 
1382  // determine the source processor
1383  unsigned p = proc[req_number];
1384  proc.erase(proc.begin() + req_number);
1385 
1386  // import the data
1387  for (int i = 0; i < ndof_to_recv[p]; i++)
1388  {
1389 #ifdef PARANOID
1390  // indicate that this dof has ben recv
1391  dof_recv[global_dofs_recv[p][i] - first_row] = true;
1392 #endif
1393  Dof_number_dense[global_dofs_recv[p][i] - first_row] =
1394  dof_numbers_recv[p][i];
1395  }
1396 
1397  // delete the data
1398  delete[] global_dofs_recv[p];
1399  delete[] dof_numbers_recv[p];
1400  }
1401 
1402  // finally wait for the send requests to complete as we are leaving
1403  // an MPI block of code
1404  unsigned csr = send_requests.size();
1405  if (csr)
1406  {
1407  MPI_Waitall(csr, &send_requests[0], MPI_STATUS_IGNORE);
1408  }
1409 
1410  // clean up
1411  delete[] ndof_to_send;
1412  delete[] first_dof_to_send;
1413  delete[] ndof_to_recv;
1414  delete[] my_global_dofs;
1415  delete[] my_dof_numbers;
1416 #ifdef PARANOID
1417  unsigned all_recv = true;
1418  for (unsigned i = 0; i < nrow_local; i++)
1419  {
1420  if (!dof_recv[i])
1421  {
1422  all_recv = false;
1423  }
1424  }
1425  if (!all_recv)
1426  {
1427  std::ostringstream error_message;
1428  error_message << "Not all the DOF numbers required were received";
1429  throw OomphLibError(error_message.str(),
1430  OOMPH_CURRENT_FUNCTION,
1431  OOMPH_EXCEPTION_LOCATION);
1432  }
1433 #endif
1434 #else
1435  std::ostringstream error_message;
1436  error_message
1437  << "The problem appears to be distributed, MPI is required";
1438  throw OomphLibError(error_message.str(),
1439  OOMPH_CURRENT_FUNCTION,
1440  OOMPH_EXCEPTION_LOCATION);
1441 #endif
1442  }
1443 #ifdef OOMPH_HAS_MPI
1444  Vector<unsigned*> sparse_rows_for_proc(nproc, 0);
1445  Vector<MPI_Request> sparse_rows_for_proc_requests;
1446  if (matrix_distributed)
1447  {
1448  // wait for number of sparse rows each processor requires
1449  // post recvs for that data
1450  if (recv_requests_sparse_nreq.size() > 0)
1451  {
1452  MPI_Waitall(recv_requests_sparse_nreq.size(),
1453  &recv_requests_sparse_nreq[0],
1454  MPI_STATUS_IGNORE);
1455  }
1456  for (unsigned p = 0; p < nproc; ++p)
1457  {
1458  if (nreq_sparse_for_proc[p] > 0)
1459  {
1460  MPI_Request req;
1461  sparse_rows_for_proc[p] = new unsigned[nreq_sparse_for_proc[p]];
1462  MPI_Irecv(sparse_rows_for_proc[p],
1463  nreq_sparse_for_proc[p],
1464  MPI_UNSIGNED,
1465  p,
1466  32,
1467  comm_pt()->mpi_comm(),
1468  &req);
1469  sparse_rows_for_proc_requests.push_back(req);
1470  }
1471  }
1472  }
1473 #endif
1474 
1475 
1476  // for every global degree of freedom required by this processor we now
1477  // have the corresponding dof number
1478 
1479  // clear the Ndof_in_dof_block storage
1480  Dof_dimension.assign(Internal_ndof_types, 0);
1481 
1482  // first consider a non distributed matrix
1483  if (!matrix_distributed)
1484  {
1485  // set the Index_in_dof_block
1486  unsigned nrow = this->distribution_pt()->nrow();
1487  Index_in_dof_block_dense.resize(nrow);
1488  Index_in_dof_block_dense.initialise(0);
1489  for (unsigned i = 0; i < nrow; i++)
1490  {
1491  Index_in_dof_block_dense[i] = Dof_dimension[Dof_number_dense[i]];
1492  Dof_dimension[Dof_number_dense[i]]++;
1493  }
1494  }
1495 
1496  // next a distributed matrix
1497  else
1498  {
1499 #ifdef OOMPH_HAS_MPI
1500 
1501 
1502  // first compute how many instances of each dof are on this
1503  // processor
1504  unsigned* my_nrows_in_dof_block = new unsigned[Internal_ndof_types];
1505  for (unsigned i = 0; i < Internal_ndof_types; i++)
1506  {
1507  my_nrows_in_dof_block[i] = 0;
1508  }
1509  for (unsigned i = 0; i < nrow_local; i++)
1510  {
1511  my_nrows_in_dof_block[Dof_number_dense[i]]++;
1512  }
1513 
1514  // next share the data
1515  unsigned* nrow_in_dof_block_recv =
1516  new unsigned[Internal_ndof_types * nproc];
1517  MPI_Allgather(my_nrows_in_dof_block,
1518  Internal_ndof_types,
1519  MPI_UNSIGNED,
1520  nrow_in_dof_block_recv,
1521  Internal_ndof_types,
1522  MPI_UNSIGNED,
1523  comm_pt()->mpi_comm());
1524  delete[] my_nrows_in_dof_block;
1525 
1526  // compute my first dof index and Nrows_in_dof_block
1527  Vector<unsigned> my_first_dof_index(Internal_ndof_types, 0);
1528  for (unsigned i = 0; i < Internal_ndof_types; i++)
1529  {
1530  for (unsigned p = 0; p < my_rank; p++)
1531  {
1532  my_first_dof_index[i] +=
1533  nrow_in_dof_block_recv[p * Internal_ndof_types + i];
1534  }
1535  Dof_dimension[i] = my_first_dof_index[i];
1536  for (unsigned p = my_rank; p < nproc; p++)
1537  {
1538  Dof_dimension[i] +=
1539  nrow_in_dof_block_recv[p * Internal_ndof_types + i];
1540  }
1541  }
1542  delete[] nrow_in_dof_block_recv;
1543 
1544  // next compute Index in dof block
1545  Index_in_dof_block_dense.resize(nrow_local);
1546  Index_in_dof_block_dense.initialise(0);
1547  Vector<unsigned> dof_counter(Internal_ndof_types, 0);
1548  for (unsigned i = 0; i < nrow_local; i++)
1549  {
1550  Index_in_dof_block_dense[i] =
1551  my_first_dof_index[Dof_number_dense[i]] +
1552  dof_counter[Dof_number_dense[i]];
1553  dof_counter[Dof_number_dense[i]]++;
1554  }
1555 
1556  // the base displacements for the sends
1557  if (sparse_rows_for_proc_requests.size() > 0)
1558  {
1559  MPI_Waitall(sparse_rows_for_proc_requests.size(),
1560  &sparse_rows_for_proc_requests[0],
1561  MPI_STATUS_IGNORE);
1562  }
1563  MPI_Aint base_displacement;
1564  MPI_Get_address(dof_number_sparse_send, &base_displacement);
1565  unsigned first_row = this->distribution_pt()->first_row();
1566  for (unsigned p = 0; p < nproc; ++p)
1567  {
1568  if (nreq_sparse_for_proc[p] > 0)
1569  {
1570  // construct the data
1571  index_in_dof_block_sparse_send[p] =
1572  new unsigned[nreq_sparse_for_proc[p]];
1573  dof_number_sparse_send[p] = new unsigned[nreq_sparse_for_proc[p]];
1574  for (unsigned i = 0; i < nreq_sparse_for_proc[p]; ++i)
1575  {
1576  unsigned r = sparse_rows_for_proc[p][i];
1577  r -= first_row;
1578  index_in_dof_block_sparse_send[p][i] =
1579  Index_in_dof_block_dense[r];
1580  dof_number_sparse_send[p][i] = Dof_number_dense[r];
1581  }
1582  delete[] sparse_rows_for_proc[p];
1583 
1584  // send the data
1585  // the datatypes, displacements, lengths for the two datatypes
1586  MPI_Datatype types[2];
1587  MPI_Aint displacements[2];
1588  int lengths[2];
1589 
1590  // index in dof block
1591  MPI_Type_contiguous(
1592  nreq_sparse_for_proc[p], MPI_UNSIGNED, &types[0]);
1593  MPI_Type_commit(&types[0]);
1594  MPI_Get_address(index_in_dof_block_sparse_send[p],
1595  &displacements[0]);
1596  displacements[0] -= base_displacement;
1597  lengths[0] = 1;
1598 
1599  // dof number
1600  MPI_Type_contiguous(
1601  nreq_sparse_for_proc[p], MPI_UNSIGNED, &types[1]);
1602  MPI_Type_commit(&types[1]);
1603  MPI_Get_address(dof_number_sparse_send[p], &displacements[1]);
1604  displacements[1] -= base_displacement;
1605  lengths[1] = 1;
1607  // build the final type
1608  MPI_Datatype send_type;
1609  MPI_Type_create_struct(
1610  2, lengths, displacements, types, &send_type);
1611  MPI_Type_commit(&send_type);
1612  MPI_Type_free(&types[0]);
1613  MPI_Type_free(&types[1]);
1614 
1615  // and recv
1616  MPI_Request req;
1617  MPI_Isend(dof_number_sparse_send,
1618  1,
1619  send_type,
1620  p,
1621  33,
1622  comm_pt()->mpi_comm(),
1623  &req);
1624  send_requests_sparse.push_back(req);
1625  MPI_Type_free(&send_type);
1626  }
1627  else
1628  {
1629  index_in_dof_block_sparse_send[p] = 0;
1630  dof_number_sparse_send[p] = 0;
1631  }
1632  }
1633 #endif
1634  }
1635  }
1636 
1637  /// //////////////////////////////////////////////////////////////////////////
1638  // end of master block preconditioner only operations
1639  /// //////////////////////////////////////////////////////////////////////////
1640 
1641  // compute the number of rows in each block
1642 
1643 #ifdef PARANOID
1644  // check the vector is the correct length
1645  if (dof_to_block_map.size() != Internal_ndof_types)
1646  {
1647  std::ostringstream error_message;
1648  error_message << "The dof_to_block_map vector (size="
1649  << dof_to_block_map.size()
1650  << ") must be of size Internal_ndof_types="
1651  << Internal_ndof_types;
1652  throw OomphLibError(
1653  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1654  }
1655 #endif
1656 
1657  // find the maximum block number RAYAY use std::max_element
1658  unsigned max_block_number = 0;
1659  for (unsigned i = 0; i < Internal_ndof_types; i++)
1660  {
1661  if (dof_to_block_map[i] > max_block_number)
1662  {
1663  max_block_number = dof_to_block_map[i];
1664  }
1665  }
1667  // resize the storage the the block to dof map
1668  Block_number_to_dof_number_lookup.clear();
1669  Block_number_to_dof_number_lookup.resize(max_block_number + 1);
1670  Ndof_in_block.clear();
1671  Ndof_in_block.resize(max_block_number + 1);
1672 
1673  // resize storage
1674  Dof_number_to_block_number_lookup.resize(Internal_ndof_types);
1675 
1676  // build the storage for the two maps (block to dof) and (dof to block)
1677  for (unsigned i = 0; i < Internal_ndof_types; i++)
1678  {
1679  Dof_number_to_block_number_lookup[i] = dof_to_block_map[i];
1680  Block_number_to_dof_number_lookup[dof_to_block_map[i]].push_back(i);
1681  Ndof_in_block[dof_to_block_map[i]]++;
1682  }
1683 
1684 #ifdef PARANOID
1685  // paranoid check that every block number has at least one DOF associated
1686  // with it
1687  for (unsigned i = 0; i < max_block_number + 1; i++)
1688  {
1689  if (Block_number_to_dof_number_lookup[i].size() == 0)
1690  {
1691  std::ostringstream error_message;
1692  error_message << "block number " << i
1693  << " does not have any DOFs associated with it";
1694  throw OomphLibWarning(error_message.str(),
1695  OOMPH_CURRENT_FUNCTION,
1696  OOMPH_EXCEPTION_LOCATION);
1697  }
1698  }
1699 #endif
1700 
1701  // Update the number of blocks types.
1702  Internal_nblock_types = max_block_number + 1;
1703 
1704  // Distributed or not, depends on if we have more than one processor.
1705  bool distributed = this->master_distribution_pt()->distributed();
1706 
1707  // Create the new block distributions.
1708  Internal_block_distribution_pt.resize(Internal_nblock_types);
1709  for (unsigned i = 0; i < Internal_nblock_types; i++)
1710  {
1711  unsigned block_dim = 0;
1712  for (unsigned j = 0; j < Ndof_in_block[i]; j++)
1713  {
1714  block_dim +=
1715  internal_dof_block_dimension(Block_number_to_dof_number_lookup[i][j]);
1716  }
1717  Internal_block_distribution_pt[i] =
1718  new LinearAlgebraDistribution(comm_pt(), block_dim, distributed);
1719  }
1720 
1721  // Work out the distribution of the dof-level blocks.
1722  // Since several dof types may be coarsened into a single dof type.
1723  // We get the dof-level block distributions from the parent preconditioner.
1724 
1725  // How many dof types are there?
1726  if (is_subsidiary_block_preconditioner())
1727  {
1728  // Delete any pre-existing distributions.
1729  const unsigned dof_block_distribution_size =
1730  Dof_block_distribution_pt.size();
1731  for (unsigned dof_i = 0; dof_i < dof_block_distribution_size; dof_i++)
1732  {
1733  delete Dof_block_distribution_pt[dof_i];
1734  }
1735  const unsigned ndofs = this->ndof_types();
1736  Dof_block_distribution_pt.resize(ndofs, 0);
1737 
1738  // For each dof type, work out how many parent preconditioner dof types
1739  // are in it.
1740  for (unsigned dof_i = 0; dof_i < ndofs; dof_i++)
1741  {
1742  // For each external dof, we get the dofs coarsened into it (from the
1743  // parent preconditioner level, not the most fine grain level).
1744  const unsigned ncoarsened_dofs_in_dof_i =
1745  Doftype_coarsen_map_coarse[dof_i].size();
1746  Vector<LinearAlgebraDistribution*> tmp_dist_pt(ncoarsened_dofs_in_dof_i,
1747  0);
1748  for (unsigned parent_dof_i = 0; parent_dof_i < ncoarsened_dofs_in_dof_i;
1749  parent_dof_i++)
1750  {
1751  tmp_dist_pt[parent_dof_i] =
1752  master_block_preconditioner_pt()->dof_block_distribution_pt(
1753  Doftype_in_master_preconditioner_coarse
1754  [Doftype_coarsen_map_coarse[dof_i][parent_dof_i]]);
1755  }
1756 
1757  Dof_block_distribution_pt[dof_i] = new LinearAlgebraDistribution;
1758 
1759 
1761  tmp_dist_pt, *Dof_block_distribution_pt[dof_i]);
1762  }
1763  }
1764 
1765  // Create Block_distribution_pt
1766  {
1767  // Delete any existing distributions in Block_distribution_pt.
1768  // (This should already be deleted in clear_block_preconditioner_base(...)
1769  // but we are just being extra safe!).
1770  unsigned n_existing_block_dist = Block_distribution_pt.size();
1771  for (unsigned dist_i = 0; dist_i < n_existing_block_dist; dist_i++)
1772  {
1773  delete Block_distribution_pt[dist_i];
1774  }
1775 
1776  Block_distribution_pt.clear();
1777 
1778  // Work out the distributions of the concatenated blocks.
1779  unsigned super_block_size = Block_to_dof_map_coarse.size();
1780  Block_distribution_pt.resize(super_block_size, 0);
1781  for (unsigned super_block_i = 0; super_block_i < super_block_size;
1782  super_block_i++)
1783  {
1784  unsigned sub_block_size = Block_to_dof_map_coarse[super_block_i].size();
1785  Vector<LinearAlgebraDistribution*> tmp_dist_pt(sub_block_size, 0);
1786 
1787  for (unsigned sub_block_i = 0; sub_block_i < sub_block_size;
1788  sub_block_i++)
1789  {
1790  tmp_dist_pt[sub_block_i] = dof_block_distribution_pt(
1791  Block_to_dof_map_coarse[super_block_i][sub_block_i]);
1792  }
1793 
1794  Block_distribution_pt[super_block_i] = new LinearAlgebraDistribution;
1795 
1797  tmp_dist_pt, *Block_distribution_pt[super_block_i]);
1798  }
1799 
1800  } // Creating Block_distribution_pt.
1801 
1802 
1803  // Create the distribution of the preconditioner matrix,
1804  // if this preconditioner is a subsidiary preconditioner then it stored
1805  // at Distribution_pt;
1806  // if this preconditioner is a master preconditioner then it is stored
1807  // at Internal_preconditioner_matrix_distribution_pt.
1808  LinearAlgebraDistribution dist;
1810  Internal_block_distribution_pt, dist);
1811 
1812  // Build the distribution.
1813  if (is_subsidiary_block_preconditioner())
1814  {
1815  this->build_distribution(dist);
1816  }
1817  else
1818  {
1819  Internal_preconditioner_matrix_distribution_pt =
1820  new LinearAlgebraDistribution(dist);
1821  }
1822 
1823  Preconditioner_matrix_distribution_pt = new LinearAlgebraDistribution;
1825  Block_distribution_pt, *Preconditioner_matrix_distribution_pt);
1826 
1827  // Clear all distributions in Auxiliary_block_distribution_pt, except for
1828  // the one which corresponds to the preconditioner matrix distribution. This
1829  // is already deleted by clear_block_preconditioner_base(...)
1830 
1831  // Create the key which corresponds to
1832  // preconditioner_matrix_distribution_pt.
1833  {
1834  const unsigned nblocks = Block_distribution_pt.size();
1835  Vector<unsigned> preconditioner_matrix_key(nblocks, 0);
1836  for (unsigned i = 0; i < nblocks; i++)
1837  {
1838  preconditioner_matrix_key[i] = i;
1839  }
1840 
1841  // Now iterate through Auxiliary_block_distribution_pt and delete
1842  // everything except for the value which corresponds to
1843  // preconditioner_matrix_key.
1844  std::map<Vector<unsigned>, LinearAlgebraDistribution*>::iterator iter =
1845  Auxiliary_block_distribution_pt.begin();
1846  while (iter != Auxiliary_block_distribution_pt.end())
1847  {
1848  if (iter->first != preconditioner_matrix_key)
1849  {
1850  delete iter->second;
1851  iter++;
1852  }
1853  else
1854  {
1855  ++iter;
1856  }
1857  }
1858 
1859  // Clear it just to be safe!
1860  Auxiliary_block_distribution_pt.clear();
1861 
1862  // Insert the preconditioner matrix distribution.
1863  insert_auxiliary_block_distribution(
1864  preconditioner_matrix_key, Preconditioner_matrix_distribution_pt);
1865  } // End of Auxiliary_block_distribution_pt encapsulation.
1866 
1867  // Clearing up after comm to assemble sparse lookup schemes.
1868 #ifdef OOMPH_HAS_MPI
1869  if (send_requests_sparse.size() > 0)
1870  {
1871  MPI_Waitall(send_requests_sparse.size(),
1872  &send_requests_sparse[0],
1873  MPI_STATUS_IGNORE);
1874  }
1875  if (recv_requests_sparse.size() > 0)
1876  {
1877  MPI_Waitall(recv_requests_sparse.size(),
1878  &recv_requests_sparse[0],
1879  MPI_STATUS_IGNORE);
1880  }
1881  for (unsigned p = 0; p < nproc; p++)
1882  {
1883  delete[] index_in_dof_block_sparse_send[p];
1884  delete[] dof_number_sparse_send[p];
1885  }
1886  delete[] index_in_dof_block_sparse_send;
1887  delete[] dof_number_sparse_send;
1888  delete[] nreq_sparse;
1889  delete[] nreq_sparse_for_proc;
1890 #endif
1891 
1892  // Next we assemble the lookup schemes for the rows
1893  // if the matrix is not distributed then we assemble Global_index
1894  // if the matrix is distributed then Rows_to_send_..., Rows_to_recv_... etc.
1895  if (!distributed)
1896  {
1897  // Resize the storage.
1898  Global_index.resize(Internal_nblock_types);
1899  for (unsigned b = 0; b < Internal_nblock_types; b++)
1900  {
1901  Global_index[b].resize(Internal_block_distribution_pt[b]->nrow());
1902  }
1903 
1904  // Compute:
1905  unsigned nrow = this->master_nrow();
1906  for (unsigned i = 0; i < nrow; i++)
1907  {
1908  // the dof type number;
1909  int dof_number = this->internal_dof_number(i);
1910  if (dof_number >= 0)
1911  {
1912  // the block number;
1913  unsigned block_number = Dof_number_to_block_number_lookup[dof_number];
1914 
1915  // the index in the block.
1916  unsigned index_in_block = 0;
1917  unsigned ptr = 0;
1918  while (int(Block_number_to_dof_number_lookup[block_number][ptr]) !=
1919  dof_number)
1920  {
1921  index_in_block += internal_dof_block_dimension(
1922  Block_number_to_dof_number_lookup[block_number][ptr]);
1923  ptr++;
1924  }
1925  index_in_block += internal_index_in_dof(i);
1926  Global_index[block_number][index_in_block] = i;
1927  }
1928  }
1929  }
1930  // otherwise the matrix is distributed
1931  else
1932  {
1933 #ifdef OOMPH_HAS_MPI
1934 
1935  // the pointer to the master distribution
1936  const LinearAlgebraDistribution* master_distribution_pt =
1937  this->master_distribution_pt();
1938 
1939  // resize the nrows... storage
1940  Nrows_to_send_for_get_block.resize(Internal_nblock_types, nproc);
1941  Nrows_to_send_for_get_block.initialise(0);
1942  Nrows_to_send_for_get_ordered.resize(nproc);
1943  Nrows_to_send_for_get_ordered.initialise(0);
1944 
1945  // loop over my rows
1946  unsigned nrow_local = master_distribution_pt->nrow_local();
1947  unsigned first_row = master_distribution_pt->first_row();
1948  for (unsigned i = 0; i < nrow_local; i++)
1949  {
1950  // the block number
1951  int b = this->internal_block_number(first_row + i);
1952 
1953  // check that the DOF i is associated with this preconditioner
1954  if (b >= 0)
1955  {
1956  // the block index
1957  unsigned j = this->internal_index_in_block(first_row + i);
1958 
1959  // the processor this row will be sent to
1960  unsigned block_p = 0;
1961  while (!(Internal_block_distribution_pt[b]->first_row(block_p) <= j &&
1962  (Internal_block_distribution_pt[b]->first_row(block_p) +
1963  Internal_block_distribution_pt[b]->nrow_local(block_p) >
1964  j)))
1965  {
1966  block_p++;
1967  }
1968 
1969  // and increment the counter
1970  Nrows_to_send_for_get_block(b, block_p)++;
1971  Nrows_to_send_for_get_ordered[block_p]++;
1972  }
1973  }
1974 
1975  // resize the storage for Nrows_to_recv
1976  Nrows_to_recv_for_get_block.resize(Internal_nblock_types, nproc);
1977  Nrows_to_recv_for_get_block.initialise(0);
1978  Nrows_to_recv_for_get_ordered.resize(nproc);
1979  Nrows_to_recv_for_get_ordered.initialise(0);
1980 
1981  // next we send the number of rows that will be sent by this processor
1982  Vector<unsigned*> nrows_to_send(nproc, 0);
1983  Vector<unsigned*> nrows_to_recv(nproc, 0);
1984  Vector<MPI_Request> send_requests_nrow;
1985  Vector<MPI_Request> recv_requests_nrow;
1986  Vector<unsigned> proc;
1987  for (unsigned p = 0; p < nproc; p++)
1988  {
1989  if (p != my_rank)
1990  {
1991  // send
1992  proc.push_back(p);
1993  nrows_to_send[p] = new unsigned[Internal_nblock_types];
1994  for (unsigned b = 0; b < Internal_nblock_types; b++)
1995  {
1996  nrows_to_send[p][b] = Nrows_to_send_for_get_block(b, p);
1997  }
1998  MPI_Request s_req;
1999  MPI_Isend(nrows_to_send[p],
2000  Internal_nblock_types,
2001  MPI_UNSIGNED,
2002  p,
2003  3,
2004  comm_pt()->mpi_comm(),
2005  &s_req);
2006  send_requests_nrow.push_back(s_req);
2007 
2008  // recv
2009  nrows_to_recv[p] = new unsigned[Internal_nblock_types];
2010  MPI_Request r_req;
2011  MPI_Irecv(nrows_to_recv[p],
2012  Internal_nblock_types,
2013  MPI_UNSIGNED,
2014  p,
2015  3,
2016  comm_pt()->mpi_comm(),
2017  &r_req);
2018  recv_requests_nrow.push_back(r_req);
2019  }
2020  // send to self
2021  else
2022  {
2023  for (unsigned b = 0; b < Internal_nblock_types; b++)
2024  {
2025  Nrows_to_recv_for_get_block(b, p) =
2026  Nrows_to_send_for_get_block(b, p);
2027  }
2028  Nrows_to_recv_for_get_ordered[p] = Nrows_to_send_for_get_ordered[p];
2029  }
2030  }
2031 
2032  // create some temporary storage for the global row indices that will
2033  // be received from another processor.
2034  DenseMatrix<int*> block_rows_to_send(Internal_nblock_types, nproc, 0);
2035  Vector<int*> ordered_rows_to_send(nproc, 0);
2036 
2037  // resize the rows... storage
2038  Rows_to_send_for_get_block.resize(Internal_nblock_types, nproc);
2039  Rows_to_send_for_get_block.initialise(0);
2040  Rows_to_send_for_get_ordered.resize(nproc);
2041  Rows_to_send_for_get_ordered.initialise(0);
2042  Rows_to_recv_for_get_block.resize(Internal_nblock_types, nproc);
2043  Rows_to_recv_for_get_block.initialise(0);
2044 
2045  // resize the storage
2046  for (unsigned p = 0; p < nproc; p++)
2047  {
2048  for (unsigned b = 0; b < Internal_nblock_types; b++)
2049  {
2050  Rows_to_send_for_get_block(b, p) =
2051  new int[Nrows_to_send_for_get_block(b, p)];
2052  if (p != my_rank)
2053  {
2054  block_rows_to_send(b, p) =
2055  new int[Nrows_to_send_for_get_block(b, p)];
2056  }
2057  else
2058  {
2059  Rows_to_recv_for_get_block(b, p) =
2060  new int[Nrows_to_send_for_get_block(b, p)];
2061  }
2062  }
2063  Rows_to_send_for_get_ordered[p] =
2064  new int[Nrows_to_send_for_get_ordered[p]];
2065  }
2066 
2067 
2068  // loop over my rows to allocate the nrows
2069  DenseMatrix<unsigned> ptr_block(Internal_nblock_types, nproc, 0);
2070  for (unsigned i = 0; i < nrow_local; i++)
2071  {
2072  // the block number
2073  int b = this->internal_block_number(first_row + i);
2074 
2075  // check that the DOF i is associated with this preconditioner
2076  if (b >= 0)
2077  {
2078  // the block index
2079  unsigned j = this->internal_index_in_block(first_row + i);
2080 
2081  // the processor this row will be sent to
2082  unsigned block_p = 0;
2083  while (!(Internal_block_distribution_pt[b]->first_row(block_p) <= j &&
2084  (Internal_block_distribution_pt[b]->first_row(block_p) +
2085  Internal_block_distribution_pt[b]->nrow_local(block_p) >
2086  j)))
2087  {
2088  block_p++;
2089  }
2090 
2091  // and store the row
2092  Rows_to_send_for_get_block(b, block_p)[ptr_block(b, block_p)] = i;
2093  if (block_p != my_rank)
2094  {
2095  block_rows_to_send(b, block_p)[ptr_block(b, block_p)] =
2096  j - Internal_block_distribution_pt[b]->first_row(block_p);
2097  }
2098  else
2099  {
2100  Rows_to_recv_for_get_block(b, block_p)[ptr_block(b, block_p)] =
2101  j - Internal_block_distribution_pt[b]->first_row(block_p);
2102  }
2103  ptr_block(b, block_p)++;
2104  }
2105  }
2106 
2107  // next block ordered
2108  for (unsigned p = 0; p < nproc; ++p)
2109  {
2110  int pt = 0;
2111  for (unsigned b = 0; b < Internal_nblock_types; ++b)
2112  {
2113  for (unsigned i = 0; i < Nrows_to_send_for_get_block(b, p); ++i)
2114  {
2115  Rows_to_send_for_get_ordered[p][pt] =
2116  Rows_to_send_for_get_block(b, p)[i];
2117  pt++;
2118  }
2119  }
2120  }
2121 
2122  // next process the nrow recvs as they complete
2123 
2124  // recv and store the data
2125  unsigned c = recv_requests_nrow.size();
2126  while (c > 0)
2127  {
2128  // wait for any communication to finish
2129  int req_number;
2130  MPI_Waitany(c, &recv_requests_nrow[0], &req_number, MPI_STATUS_IGNORE);
2131  recv_requests_nrow.erase(recv_requests_nrow.begin() + req_number);
2132  c--;
2133 
2134  // determine the source processor
2135  unsigned p = proc[req_number];
2136  proc.erase(proc.begin() + req_number);
2137 
2138  // copy the data to its final storage
2139  Nrows_to_recv_for_get_ordered[p] = 0;
2140  for (unsigned b = 0; b < Internal_nblock_types; b++)
2141  {
2142  Nrows_to_recv_for_get_block(b, p) = nrows_to_recv[p][b];
2143  Nrows_to_recv_for_get_ordered[p] += nrows_to_recv[p][b];
2144  }
2145 
2146  // and clear
2147  delete[] nrows_to_recv[p];
2148  }
2149 
2150  // resize the storage for the incoming rows data
2151  Rows_to_recv_for_get_ordered.resize(nproc, 0);
2152  for (unsigned p = 0; p < nproc; p++)
2153  {
2154  if (p != my_rank)
2155  {
2156  for (unsigned b = 0; b < Internal_nblock_types; b++)
2157  {
2158  Rows_to_recv_for_get_block(b, p) =
2159  new int[Nrows_to_recv_for_get_block(b, p)];
2160  }
2161  }
2162  }
2163 
2164  // compute the number of sends and recv from this processor
2165  // to each other processor
2166  Vector<unsigned> nsend_for_rows(nproc, 0);
2167  Vector<unsigned> nrecv_for_rows(nproc, 0);
2168  for (unsigned p = 0; p < nproc; p++)
2169  {
2170  if (p != my_rank)
2171  {
2172  for (unsigned b = 0; b < Internal_nblock_types; b++)
2173  {
2174  if (Nrows_to_send_for_get_block(b, p) > 0)
2175  {
2176  nsend_for_rows[p]++;
2177  }
2178  if (Nrows_to_recv_for_get_block(b, p) > 0)
2179  {
2180  nrecv_for_rows[p]++;
2181  }
2182  }
2183  }
2184  }
2185 
2186  // finally post the sends and recvs
2187  MPI_Aint base_displacement;
2188  MPI_Get_address(matrix_pt(), &base_displacement);
2189  Vector<MPI_Request> req_rows;
2190  for (unsigned p = 0; p < nproc; p++)
2191  {
2192  if (p != my_rank)
2193  {
2194  // send
2195  if (nsend_for_rows[p] > 0)
2196  {
2197  MPI_Datatype send_types[nsend_for_rows[p]];
2198  MPI_Aint send_displacements[nsend_for_rows[p]];
2199  int send_sz[nsend_for_rows[p]];
2200  unsigned send_ptr = 0;
2201  for (unsigned b = 0; b < Internal_nblock_types; b++)
2202  {
2203  if (Nrows_to_send_for_get_block(b, p) > 0)
2204  {
2205  MPI_Type_contiguous(Nrows_to_send_for_get_block(b, p),
2206  MPI_INT,
2207  &send_types[send_ptr]);
2208  MPI_Type_commit(&send_types[send_ptr]);
2209  MPI_Get_address(block_rows_to_send(b, p),
2210  &send_displacements[send_ptr]);
2211  send_displacements[send_ptr] -= base_displacement;
2212  send_sz[send_ptr] = 1;
2213  send_ptr++;
2214  }
2215  }
2216  MPI_Datatype final_send_type;
2217  MPI_Type_create_struct(nsend_for_rows[p],
2218  send_sz,
2219  send_displacements,
2220  send_types,
2221  &final_send_type);
2222  MPI_Type_commit(&final_send_type);
2223  for (unsigned i = 0; i < nsend_for_rows[p]; i++)
2224  {
2225  MPI_Type_free(&send_types[i]);
2226  }
2227  MPI_Request send_req;
2228  MPI_Isend(matrix_pt(),
2229  1,
2230  final_send_type,
2231  p,
2232  4,
2233  comm_pt()->mpi_comm(),
2234  &send_req);
2235  req_rows.push_back(send_req);
2236  MPI_Type_free(&final_send_type);
2237  }
2238 
2239  // recv
2240  if (nrecv_for_rows[p] > 0)
2241  {
2242  MPI_Datatype recv_types[nrecv_for_rows[p]];
2243  MPI_Aint recv_displacements[nrecv_for_rows[p]];
2244  int recv_sz[nrecv_for_rows[p]];
2245  unsigned recv_ptr = 0;
2246  for (unsigned b = 0; b < Internal_nblock_types; b++)
2247  {
2248  if (Nrows_to_recv_for_get_block(b, p) > 0)
2249  {
2250  MPI_Type_contiguous(Nrows_to_recv_for_get_block(b, p),
2251  MPI_INT,
2252  &recv_types[recv_ptr]);
2253  MPI_Type_commit(&recv_types[recv_ptr]);
2254  MPI_Get_address(Rows_to_recv_for_get_block(b, p),
2255  &recv_displacements[recv_ptr]);
2256  recv_displacements[recv_ptr] -= base_displacement;
2257  recv_sz[recv_ptr] = 1;
2258  recv_ptr++;
2259  }
2260  }
2261  MPI_Datatype final_recv_type;
2262  MPI_Type_create_struct(nrecv_for_rows[p],
2263  recv_sz,
2264  recv_displacements,
2265  recv_types,
2266  &final_recv_type);
2267  MPI_Type_commit(&final_recv_type);
2268  for (unsigned i = 0; i < nrecv_for_rows[p]; i++)
2269  {
2270  MPI_Type_free(&recv_types[i]);
2271  }
2272  MPI_Request recv_req;
2273  MPI_Irecv(matrix_pt(),
2274  1,
2275  final_recv_type,
2276  p,
2277  4,
2278  comm_pt()->mpi_comm(),
2279  &recv_req);
2280  req_rows.push_back(recv_req);
2281  MPI_Type_free(&final_recv_type);
2282  }
2283  }
2284  }
2285 
2286  // cleaning up Waitalls
2287 
2288 
2289  // wait for the recv requests so we can compute
2290  // Nrows_to_recv_for_get_ordered
2291  unsigned n_req_rows = req_rows.size();
2292  if (n_req_rows)
2293  {
2294  MPI_Waitall(n_req_rows, &req_rows[0], MPI_STATUS_IGNORE);
2295  }
2296 
2297  // resize the storage
2298  Rows_to_recv_for_get_ordered.resize(nproc);
2299  Rows_to_recv_for_get_ordered.initialise(0);
2300 
2301  // construct block offset
2302  Vector<int> vec_offset(Internal_nblock_types, 0);
2303  for (unsigned b = 1; b < Internal_nblock_types; ++b)
2304  {
2305  vec_offset[b] = vec_offset[b - 1] +
2306  Internal_block_distribution_pt[b - 1]->nrow_local();
2307  }
2308 
2309  //
2310  for (unsigned p = 0; p < nproc; p++)
2311  {
2312  int pt = 0;
2313  Rows_to_recv_for_get_ordered[p] =
2314  new int[Nrows_to_recv_for_get_ordered[p]];
2315  for (unsigned b = 0; b < Internal_nblock_types; b++)
2316  {
2317  for (unsigned i = 0; i < Nrows_to_recv_for_get_block(b, p); i++)
2318  {
2319  Rows_to_recv_for_get_ordered[p][pt] =
2320  Rows_to_recv_for_get_block(b, p)[i] + vec_offset[b];
2321  pt++;
2322  }
2323  }
2324  }
2325 
2326  // clean up
2327  for (unsigned p = 0; p < nproc; p++)
2328  {
2329  if (p != my_rank)
2330  {
2331  for (unsigned b = 0; b < Internal_nblock_types; b++)
2332  {
2333  delete[] block_rows_to_send(b, p);
2334  }
2335  if (Nrows_to_send_for_get_ordered[p] > 0)
2336  {
2337  delete[] ordered_rows_to_send[p];
2338  }
2339  }
2340  }
2341 
2342  // and the send reqs
2343  unsigned n_req_send_nrow = send_requests_nrow.size();
2344  if (n_req_send_nrow)
2345  {
2346  MPI_Waitall(n_req_send_nrow, &send_requests_nrow[0], MPI_STATUS_IGNORE);
2347  }
2348  for (unsigned p = 0; p < nproc; p++)
2349  {
2350  delete[] nrows_to_send[p];
2351  }
2352 #endif
2353  }
2354 
2355  // If we asked for output of blocks to a file then do it.
2356  if (block_output_on()) output_blocks_to_files(Output_base_filename);
2357  }
2358 
2359  //============================================================================
2360  //??ds
2361  /// Function to turn this preconditioner into a
2362  /// subsidiary preconditioner that operates within a bigger
2363  /// "master block preconditioner (e.g. a Navier-Stokes 2x2 block
2364  /// preconditioner dealing with the fluid sub-blocks within a
2365  /// 3x3 FSI preconditioner. Once this is done the master block
2366  /// preconditioner deals with the block setup etc.
2367  /// The vector block_map must specify the dof number in the
2368  /// master preconditioner that corresponds to a block number in this
2369  /// preconditioner. ??ds horribly misleading comment!
2370  /// The length of the vector is used to determine the number of
2371  /// blocks in this preconditioner therefore it must be correctly sized.
2372  /// This calls the other turn_into_subsidiary_block_preconditioner(...)
2373  /// function providing an empty doftype_to_doftype_map vector.
2374  //============================================================================
2375  template<typename MATRIX>
2377  BlockPreconditioner<MATRIX>* master_block_prec_pt,
2378  const Vector<unsigned>& doftype_in_master_preconditioner_coarse)
2379  {
2380  // Create the identity dof_coarsen_map
2381  Vector<Vector<unsigned>> doftype_coarsen_map_coarse;
2382  unsigned doftype_in_master_preconditioner_coarse_size =
2383  doftype_in_master_preconditioner_coarse.size();
2384 
2385  for (unsigned dof_i = 0;
2386  dof_i < doftype_in_master_preconditioner_coarse_size;
2387  dof_i++)
2388  {
2389  // Create a vector of size 1 and value i,
2390  // then push it into the dof_coarsen_map vector.
2391  Vector<unsigned> tmp_vec(1, dof_i);
2392  doftype_coarsen_map_coarse.push_back(tmp_vec);
2393  }
2394 
2395  // Call the other turn_into_subsidiary_block_preconditioner function.
2396  turn_into_subsidiary_block_preconditioner(
2397  master_block_prec_pt,
2398  doftype_in_master_preconditioner_coarse,
2399  doftype_coarsen_map_coarse);
2400  }
2401 
2402 
2403  //============================================================================
2404  /// Function to turn this block preconditioner into a
2405  /// subsidiary block preconditioner that operates within a bigger
2406  /// master block preconditioner (e.g. a Navier-Stokes 2x2 block
2407  /// preconditioner dealing with the fluid sub-blocks within a
2408  /// 3x3 FSI preconditioner. Once this is done the master block
2409  /// preconditioner deals with the block setup etc.
2410  ///
2411  /// The vector doftype_map must specify the dof type in the
2412  /// master preconditioner that corresponds to a dof type in this block
2413  /// preconditioner.
2414  ///
2415  /// In general, we want:
2416  /// doftype_map[doftype in subsidiary prec] = doftype in master prec.
2417  ///
2418  /// It tells this block preconditioner which dof types of the master
2419  /// block preconditioner it is working with.
2420  ///
2421  /// The length of the vector is used to determine the number of
2422  /// dof types in THIS block preconditioner therefore it must be correctly
2423  /// sized.
2424  ///
2425  /// For example, let the master block preconditioner have 5 dof types in total
2426  /// and a 1-4 dof type splitting where the block (0,0) corresponds to
2427  /// dof type 0 and the block (1,1) corresponds to dof types 1, 2, 3 and 4
2428  /// (i.e. it would have given to block_setup the vector [0,1,1,1,1]).
2429  /// Furthermore, it solves (1,1) block with subsidiary block preconditioner.
2430  /// Then the doftype_map passed to this function of the subsidiary block
2431  /// preconditioner would be [1, 2, 3, 4].
2432  ///
2433  /// Dof type coarsening (following on from the example above):
2434  /// Let the subsidiary block preconditioner (THIS block preconditioner)
2435  /// only works with two DOF types, then the master block preconditioner must
2436  /// "coarsen" the dof types by providing the optional argument
2437  /// doftype_coarsen_map vector.
2438  ///
2439  /// The doftype_coarsen_map vector (in this case) might be [[0,1], [2,3]]
2440  /// telling the subsidiary block preconditioner that the SUBSIDIARY dof types
2441  /// 0 and 1 should be treated as dof type 0 and the subsidiary dof types 2
2442  /// and 3 should be treated as subsidiary dof type 1.
2443  ///
2444  /// If no doftype_coarsen_map vector is provided, then the identity is
2445  /// used automatically (see the turn_into_subsidiary_block_preconditioner(...)
2446  /// function with only two arguments). In the above case, the identity
2447  /// doftype_coarsen_map vector for the subsidiary block preconditioner
2448  /// would be the 2D vector [[0], [1], [2], [3]] which means
2449  /// dof type 0 is treated as dof type 0,
2450  /// dof type 1 is treated as dof type 1,
2451  /// dof type 2 is treated as dof type 2, and
2452  /// dof type 3 is treated as dof type 3.
2453  //============================================================================
2454  template<typename MATRIX>
2456  BlockPreconditioner<MATRIX>* master_block_prec_pt,
2457  const Vector<unsigned>& doftype_in_master_preconditioner_coarse,
2458  const Vector<Vector<unsigned>>& doftype_coarsen_map_coarse)
2459  {
2460  // Set the master block preconditioner pointer
2461  Master_block_preconditioner_pt = master_block_prec_pt;
2462 
2463  // Set the Doftype_coarsen_map_coarse.
2464  Doftype_coarsen_map_coarse = doftype_coarsen_map_coarse;
2465 
2466  Doftype_in_master_preconditioner_coarse =
2467  doftype_in_master_preconditioner_coarse;
2468  } // end of turn_into_subsidiary_block_preconditioner(...)
2469 
2470 
2471  //============================================================================
2472  /// Determine the size of the matrix blocks and setup the
2473  /// lookup schemes relating the global degrees of freedom with
2474  /// their "blocks" and their indices (row/column numbers) in those
2475  /// blocks.
2476  /// The distributions of the preconditioner and the blocks are
2477  /// automatically specified (and assumed to be uniform) at this
2478  /// stage.
2479  /// This method should be used if each DOF type corresponds to a
2480  /// unique block type.
2481  //============================================================================
2482  template<typename MATRIX>
2484  {
2485 #ifdef PARANOID
2486 
2487  // Subsidiary preconditioners don't really need the meshes
2488  if (this->is_master_block_preconditioner())
2489  {
2490  std::ostringstream err_msg;
2491  unsigned n = nmesh();
2492  if (n == 0)
2493  {
2494  err_msg << "No meshes have been set for this block preconditioner!\n"
2495  << "Set one with set_nmesh(...), set_mesh(...)" << std::endl;
2496  throw OomphLibError(
2497  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2498  for (unsigned m = 0; m < n; m++)
2499  {
2500  if (Mesh_pt[m] == 0)
2501  {
2502  err_msg << "The mesh pointer to mesh " << m << " is null!\n"
2503  << "Set a non-null one with set_mesh(...)" << std::endl;
2504  throw OomphLibError(
2505  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2506  }
2507  }
2508  }
2509  }
2510 #endif
2511 
2512  // Get the number of dof types.
2513  unsigned internal_n_dof_types = ndof_types();
2514 
2515  // Build the dof to block map - assume that each type of dof corresponds
2516  // to a different type of block.
2517  Vector<unsigned> dof_to_block_lookup(internal_n_dof_types);
2518  for (unsigned i = 0; i < internal_n_dof_types; i++)
2519  {
2520  dof_to_block_lookup[i] = i;
2521  }
2522 
2523  // call the block setup method
2524  this->block_setup(dof_to_block_lookup);
2525  }
2526 
2527 
2528  //============================================================================
2529  /// Get the block matrices required for the block preconditioner. Takes a
2530  /// pointer to a matrix of bools that indicate if a specified sub-block is
2531  /// required for the preconditioning operation. Computes the required block
2532  /// matrices, and stores pointers to them in the matrix block_matrix_pt. If an
2533  /// entry in block_matrix_pt is equal to NULL that sub-block has not been
2534  /// requested and is therefore not available.
2535  //============================================================================
2536  template<typename MATRIX>
2538  DenseMatrix<bool>& required_blocks,
2539  DenseMatrix<MATRIX*>& block_matrix_pt) const
2540  {
2541  // Cache number of block types
2542  const unsigned n_block_types = nblock_types();
2543 
2544 #ifdef PARANOID
2545  // If required blocks matrix pointer is not the correct size then abort.
2546  if ((required_blocks.nrow() != n_block_types) ||
2547  (required_blocks.ncol() != n_block_types))
2548  {
2549  std::ostringstream error_message;
2550  error_message << "The size of the matrix of bools required_blocks "
2551  << "(which indicates which blocks are required) is not the "
2552  << "right size, required_blocks is "
2553  << required_blocks.ncol() << " x " << required_blocks.nrow()
2554  << ", whereas it should "
2555  << "be " << n_block_types << " x " << n_block_types;
2556  throw OomphLibError(
2557  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2558  }
2559 
2560  // If block matrix pointer is not the correct size then abort.
2561  if ((block_matrix_pt.nrow() != n_block_types) ||
2562  (block_matrix_pt.ncol() != n_block_types))
2563  {
2564  std::ostringstream error_message;
2565  error_message << "The size of the block matrix pt is not the "
2566  << "right size, block_matrix_pt is "
2567  << block_matrix_pt.ncol() << " x " << block_matrix_pt.nrow()
2568  << ", whereas it should "
2569  << "be " << n_block_types << " x " << n_block_types;
2570  throw OomphLibError(
2571  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2572  }
2573 
2574 #endif
2575 
2576  // Loop over the blocks
2577  for (unsigned i = 0; i < n_block_types; i++)
2578  {
2579  for (unsigned j = 0; j < n_block_types; j++)
2580  {
2581  // If block(i,j) is required then create a matrix and fill it in.
2582  if (required_blocks(i, j))
2583  {
2584  //??ds might want to remove this use of new as well?
2585  block_matrix_pt(i, j) = new MATRIX;
2586  get_block(i, j, *block_matrix_pt(i, j));
2587  }
2588 
2589  // Otherwise set pointer to null.
2590  else
2591  {
2592  block_matrix_pt(i, j) = 0;
2593  }
2594  }
2595  }
2596  }
2597 
2598  //============================================================================
2599  /// Takes the naturally ordered vector and extracts the blocks
2600  /// indicated by the block number (the values) in the Vector
2601  /// block_vec_number all at once, then concatenates them without
2602  /// communication. Here, the values in block_vec_number is the block number
2603  /// in the current preconditioner.
2604  /// This is a non-const function because distributions may be created
2605  /// and stored in Auxiliary_block_distribution_pt for future use.
2606  //============================================================================
2607  template<typename MATRIX>
2609  const Vector<unsigned>& block_vec_number,
2610  const DoubleVector& v,
2611  DoubleVector& w)
2612  {
2613 #ifdef PARANOID
2614 
2615  // Check if v is built.
2616  if (!v.built())
2617  {
2618  std::ostringstream err_msg;
2619  err_msg << "The distribution of the global vector v must be setup.";
2620  throw OomphLibError(
2621  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2622  }
2623 
2624  // v must have the same distribution as the upper-most master block
2625  // preconditioner, since the upper-most master block preconditioner
2626  // should have the same distribution as the matrix pointed to
2627  // by matrix_pt().
2628  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
2629  {
2630  std::ostringstream err_msg;
2631  err_msg << "The distribution of the global vector v must match the "
2632  << " specified master_distribution_pt(). \n"
2633  << "i.e. Distribution_pt in the master preconditioner";
2634  throw OomphLibError(
2635  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2636  }
2637 
2638  // Check to see if there are more blocks defined in the block_vec_number
2639  // vector than the number of block types. This is not allowed.
2640  const unsigned para_nblock_types = nblock_types();
2641  const unsigned para_block_vec_number_size = block_vec_number.size();
2642  if (para_block_vec_number_size > para_nblock_types)
2643  {
2644  std::ostringstream err_msg;
2645  err_msg << "You have requested " << para_block_vec_number_size
2646  << " number of blocks, (block_vec_number.size() is "
2647  << para_block_vec_number_size << ").\n"
2648  << "But there are only " << para_nblock_types
2649  << " nblock_types.\n"
2650  << "Please make sure that block_vec_number is correctly sized.\n";
2651  throw OomphLibError(
2652  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2653  }
2654 
2655  // Check if any block numbers defined in block_vec_number is equal to or
2656  // greater than the number of block types.
2657  // E.g. if there are 5 block types, we can only have block numbers:
2658  // 0, 1, 2, 3 and 4.
2659  for (unsigned i = 0; i < para_block_vec_number_size; i++)
2660  {
2661  const unsigned para_required_block = block_vec_number[i];
2662  if (para_required_block >= para_nblock_types)
2663  {
2664  std::ostringstream err_msg;
2665  err_msg << "block_vec_number[" << i << "] is " << para_required_block
2666  << ".\n"
2667  << "But there are only " << para_nblock_types
2668  << " nblock_types.\n";
2669  throw OomphLibError(
2670  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2671  }
2672  }
2673 
2674  // Check that no block number is inserted twice.
2675  std::set<unsigned> para_set;
2676  for (unsigned b = 0; b < para_block_vec_number_size; b++)
2677  {
2678  std::pair<std::set<unsigned>::iterator, bool> para_set_ret;
2679  para_set_ret = para_set.insert(block_vec_number[b]);
2680 
2681  if (!para_set_ret.second)
2682  {
2683  std::ostringstream err_msg;
2684  err_msg << "Error: the block number " << block_vec_number[b]
2685  << " appears twice.\n";
2686  throw OomphLibError(
2687  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2688  }
2689  }
2690 #endif
2691 
2692  // Number of blocks to get.
2693  const unsigned n_block = block_vec_number.size();
2694 
2695  // Each block is made of dof types. We get the most fine grain dof types.
2696  // Most fine grain in the sense that these are the dof types that belongs
2697  // in this block before any coarsening of dof types has taken place.
2698  // The ordering of the dof types matters, this is handled properly when
2699  // creating the Block_to_dof_map_fine vector and must be respected here.
2700  // I.e. we cannot arbitrarily insert dof types (even if they are correct)
2701  // in the vector most_fine_grain_dof.
2702  Vector<unsigned> most_fine_grain_dof;
2703  for (unsigned b = 0; b < n_block; b++)
2704  {
2705  const unsigned mapped_b = block_vec_number[b];
2706  most_fine_grain_dof.insert(most_fine_grain_dof.end(),
2707  Block_to_dof_map_fine[mapped_b].begin(),
2708  Block_to_dof_map_fine[mapped_b].end());
2709  }
2710 
2711  // Get all the dof level vectors in one go.
2712  Vector<DoubleVector> dof_block_vector;
2713  internal_get_block_vectors(most_fine_grain_dof, v, dof_block_vector);
2714 
2715  // Next we need to build the output DoubleVector w with the correct
2716  // distribution: the concatenation of the distributions of all the
2717  // dof-level vectors. This is the same as the concatenation of the
2718  // distributions of the blocks within this preconditioner.
2719  //
2720  // So we first check if it exists already, if not, we create it and
2721  // store it for future use. We store it because concatenation of
2722  // distributions requires communication, so concatenation of
2723  // distributions on-the-fly should be avoided.
2724  std::map<Vector<unsigned>, LinearAlgebraDistribution*>::const_iterator iter;
2725 
2726  // Attempt to get an iterator pointing to the pair with the value
2727  // block_vec_number.
2728  iter = Auxiliary_block_distribution_pt.find(block_vec_number);
2729 
2730  if (iter != Auxiliary_block_distribution_pt.end())
2731  // If it exists, build w with the distribution pointed to
2732  // by pair::second.
2733  {
2734  w.build(iter->second);
2735  }
2736  else
2737  // Else, we need to create the distribution and store it in
2738  // Auxiliary_block_distribution_pt.
2739  {
2740  Vector<LinearAlgebraDistribution*> tmp_vec_dist_pt(n_block, 0);
2741  for (unsigned b = 0; b < n_block; b++)
2742  {
2743  tmp_vec_dist_pt[b] = Block_distribution_pt[block_vec_number[b]];
2744  }
2745 
2746  // Note that the distribution is created with new but not deleted here.
2747  // This is handled in the clean up functions.
2748  LinearAlgebraDistribution* tmp_dist_pt = new LinearAlgebraDistribution;
2750  *tmp_dist_pt);
2751 
2752  // Store the pair of Vector<unsigned> and LinearAlgebraDistribution*
2753  insert_auxiliary_block_distribution(block_vec_number, tmp_dist_pt);
2754 
2755  // Build w.
2756  w.build(tmp_dist_pt);
2757  }
2758 
2759  // Now concatenate all the dof level vectors into the vector w.
2761 
2762  } // get_concatenated_block_vector(...)
2763 
2764  //============================================================================
2765  /// Takes concatenated block ordered vector, b, and copies its
2766  // entries to the appropriate entries in the naturally ordered vector, v.
2767  // Here the values in block_vec_number indicates which blocks the vector
2768  // b is a concatenation of. The block number are those in the current
2769  // preconditioner. If the preconditioner is a subsidiary block
2770  // preconditioner the other entries in v that are not associated with it
2771  // are left alone.
2772  //============================================================================
2773  template<typename MATRIX>
2775  const Vector<unsigned>& block_vec_number,
2776  const DoubleVector& w,
2777  DoubleVector& v) const
2778  {
2779 #ifdef PARANOID
2780 
2781  // Check if v is built.
2782  if (!v.built())
2783  {
2784  std::ostringstream err_msg;
2785  err_msg << "The distribution of the global vector v must be setup.";
2786  throw OomphLibError(
2787  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2788  }
2789 
2790  // v must have the same distribution as the upper-most master block
2791  // preconditioner, since the upper-most master block preconditioner
2792  // should have the same distribution as the matrix pointed to
2793  // by matrix_pt().
2794  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
2795  {
2796  std::ostringstream err_msg;
2797  err_msg << "The distribution of the global vector v must match the "
2798  << " specified master_distribution_pt(). \n"
2799  << "i.e. Distribution_pt in the master preconditioner";
2800  throw OomphLibError(
2801  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2802  }
2803 
2804  // Check to see if there are more blocks defined in the block_vec_number
2805  // vector than the number of block types. This is not allowed.
2806  const unsigned para_block_vec_number_size = block_vec_number.size();
2807  const unsigned para_n_block = nblock_types();
2808  if (para_block_vec_number_size > para_n_block)
2809  {
2810  std::ostringstream err_msg;
2811  err_msg << "Trying to return " << para_block_vec_number_size
2812  << " block vectors.\n"
2813  << "But there are only " << para_n_block << " block types.\n";
2815  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2816  }
2817 
2818  // Check if any block numbers defined in block_vec_number is equal to or
2819  // greater than the number of block types.
2820  // E.g. if there are 5 block types, we can only have block numbers:
2821  // 0, 1, 2, 3 and 4.
2822  for (unsigned b = 0; b < para_block_vec_number_size; b++)
2823  {
2824  const unsigned para_required_block = block_vec_number[b];
2825  if (para_required_block > para_n_block)
2826  {
2827  std::ostringstream err_msg;
2828  err_msg << "block_vec_number[" << b << "] is " << para_required_block
2829  << ".\n"
2830  << "But there are only " << para_n_block << " block types.\n";
2831  throw OomphLibError(
2832  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2833  }
2834  }
2835 
2836  // Check that no block number is inserted twice.
2837  std::set<unsigned> para_set;
2838  for (unsigned b = 0; b < para_block_vec_number_size; b++)
2839  {
2840  std::pair<std::set<unsigned>::iterator, bool> para_set_ret;
2841  para_set_ret = para_set.insert(block_vec_number[b]);
2842 
2843  if (!para_set_ret.second)
2844  {
2845  std::ostringstream err_msg;
2846  err_msg << "Error: the block number " << block_vec_number[b]
2847  << " appears twice.\n";
2848  throw OomphLibError(
2849  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2850  }
2851  }
2852 
2853  // Check that w is built.
2854  if (!w.built())
2855  {
2856  std::ostringstream err_msg;
2857  err_msg << "The distribution of the block vector w must be setup.";
2858  throw OomphLibError(
2859  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2860  }
2861 
2862  // Check that the distributions defined by block_vec_number is correct for
2863  // the distribution from w.
2864  // Recall that w is the concatenation of the block vectors defined by
2865  // the values in block_vec_number. We check that this is the case.
2866  Vector<LinearAlgebraDistribution*> para_vec_dist_pt(
2867  para_block_vec_number_size, 0);
2868 
2869  for (unsigned b = 0; b < para_block_vec_number_size; b++)
2870  {
2871  para_vec_dist_pt[b] = Block_distribution_pt[block_vec_number[b]];
2872  }
2873 
2874  LinearAlgebraDistribution para_tmp_dist;
2875 
2877  para_tmp_dist);
2878 
2879  if (*w.distribution_pt() != para_tmp_dist)
2880  {
2881  std::ostringstream err_msg;
2882  err_msg << "The distribution of the block vector w does not match \n"
2883  << "the concatenation of the block distributions defined in \n"
2884  << "block_vec_number.\n";
2885  throw OomphLibError(
2886  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2887  }
2888 #endif
2889 
2890  // Number of blocks to return.
2891  const unsigned n_block = block_vec_number.size();
2892 
2893  // Each block is made of dof types. We get the most fine grain dof types.
2894  // Most fine grain in the sense that these are the dof types that belongs
2895  // in this block before any coarsening of dof types has taken place.
2896  // The ordering of the dof types matters, this is handled properly when
2897  // creating the Block_to_dof_map_fine vector and must be respected here.
2898  // I.e. we cannot arbitrarily insert dof types (even if they are correct)
2899  // in the vector most_fine_grain_dof.
2900  Vector<unsigned> most_fine_grain_dof;
2901  for (unsigned b = 0; b < n_block; b++)
2902  {
2903  const unsigned mapped_b = block_vec_number[b];
2904  most_fine_grain_dof.insert(most_fine_grain_dof.end(),
2905  Block_to_dof_map_fine[mapped_b].begin(),
2906  Block_to_dof_map_fine[mapped_b].end());
2907  }
2908 
2909  // The number of most fine grain dof types associated with the blocks
2910  // defined by block_vec_number.
2911  const unsigned ndof = most_fine_grain_dof.size();
2912 
2913  // Build each dof level vector with the correct distribution.
2914  Vector<DoubleVector> dof_vector(ndof);
2915  for (unsigned d = 0; d < ndof; d++)
2916  {
2917  dof_vector[d].build(
2918  internal_block_distribution_pt(most_fine_grain_dof[d]));
2919  }
2920 
2921  // Perform the splitting of w into the most fine grain dof level vectors.
2923 
2924  // Return all the dof level vectors in one go.
2925  internal_return_block_vectors(most_fine_grain_dof, dof_vector, v);
2926  } // return_concatenated_block_vector(...)
2927 
2928  //============================================================================
2929  /// Takes the naturally ordered vector and rearranges it into a
2930  /// vector of sub vectors corresponding to the blocks, so s[b][i] contains
2931  /// the i-th entry in the vector associated with block b.
2932  /// Note: If the preconditioner is a subsidiary preconditioner then only the
2933  /// sub-vectors associated with the blocks of the subsidiary preconditioner
2934  /// will be included. Hence the length of v is master_nrow() whereas the
2935  /// total length of the s vectors is the sum of the lengths of the
2936  /// individual block vectors defined in block_vec_number.
2937  //============================================================================
2938  template<typename MATRIX>
2940  const Vector<unsigned>& block_vec_number,
2941  const DoubleVector& v,
2942  Vector<DoubleVector>& s) const
2943  {
2944 #ifdef PARANOID
2945 
2946  // Check if v is built.
2947  if (!v.built())
2948  {
2949  std::ostringstream err_msg;
2950  err_msg << "The distribution of the global vector v must be setup.";
2951  throw OomphLibError(
2952  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2953  }
2954 
2955  // v must have the same distribution as the upper-most master block
2956  // preconditioner, since the upper-most master block preconditioner
2957  // should have the same distribution as the matrix pointed to
2958  // by matrix_pt().
2959  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
2960  {
2961  std::ostringstream err_msg;
2962  err_msg << "The distribution of the global vector v must match the "
2963  << " specified master_distribution_pt(). \n"
2964  << "i.e. Distribution_pt in the master preconditioner";
2965  throw OomphLibError(
2966  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2967  }
2968 
2969  // Check to see if there are more blocks defined in the block_vec_number
2970  // vector than the number of block types. This is not allowed.
2971  const unsigned para_nblock_types = nblock_types();
2972  const unsigned para_block_vec_number_size = block_vec_number.size();
2973  if (para_block_vec_number_size > para_nblock_types)
2974  {
2975  std::ostringstream err_msg;
2976  err_msg << "You have requested " << para_block_vec_number_size
2977  << " number of blocks, (block_vec_number.size() is "
2978  << para_block_vec_number_size << ").\n"
2979  << "But there are only " << para_nblock_types
2980  << " nblock_types.\n"
2981  << "Please make sure that block_vec_number is correctly sized.\n";
2982  throw OomphLibError(
2983  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2984  }
2985 
2986  // Check if any block numbers defined in block_vec_number is equal to or
2987  // greater than the number of block types.
2988  // E.g. if there are 5 block types, we can only have block numbers:
2989  // 0, 1, 2, 3 and 4.
2990  for (unsigned i = 0; i < para_block_vec_number_size; i++)
2991  {
2992  const unsigned para_required_block = block_vec_number[i];
2993  if (para_required_block > para_nblock_types)
2994  {
2995  std::ostringstream err_msg;
2996  err_msg << "block_vec_number[" << i << "] is " << para_required_block
2997  << ".\n"
2998  << "But there are only " << para_nblock_types
2999  << " nblock_types.\n";
3000  throw OomphLibError(
3001  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3002  }
3003  }
3004  // Check that no block number is inserted twice.
3005  std::set<unsigned> para_set;
3006  for (unsigned b = 0; b < para_block_vec_number_size; b++)
3007  {
3008  std::pair<std::set<unsigned>::iterator, bool> para_set_ret;
3009  para_set_ret = para_set.insert(block_vec_number[b]);
3010 
3011  if (!para_set_ret.second)
3012  {
3013  std::ostringstream err_msg;
3014  err_msg << "Error: the block number " << block_vec_number[b]
3015  << " appears twice.\n";
3016  throw OomphLibError(
3017  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3018  }
3019  }
3020 #endif
3021 
3022  // Number of blocks to get.
3023  const unsigned n_block = block_vec_number.size();
3024  s.resize(n_block);
3025 
3026  // Each block is made of dof types. We get the most fine grain dof types.
3027  // Most fine grain in the sense that these are the dof types that belongs
3028  // in this block before any coarsening of dof types has taken place.
3029  // The ordering of the dof types matters, this is handled properly when
3030  // creating the Block_to_dof_map_fine vector and must be respected here.
3031  // I.e. we cannot arbitrarily insert dof types (even if they are correct)
3032  // in the vector most_fine_grain_dof.
3033  Vector<unsigned> most_fine_grain_dof;
3034  for (unsigned b = 0; b < n_block; b++)
3035  {
3036  const unsigned mapped_b = block_vec_number[b];
3037 
3038  most_fine_grain_dof.insert(most_fine_grain_dof.end(),
3039  Block_to_dof_map_fine[mapped_b].begin(),
3040  Block_to_dof_map_fine[mapped_b].end());
3041  }
3042 
3043  // Get all the dof level vectors in one go.
3044  Vector<DoubleVector> dof_vector;
3045  internal_get_block_vectors(most_fine_grain_dof, v, dof_vector);
3046 
3047  // For each block vector requested,
3048  // build the block s[b],
3049  // concatenate the corresponding dof vector
3050 
3051  // Since all the dof vectors are in dof_vector,
3052  // we need to loop through this.
3053  // The offset helps us loop through this.
3054  unsigned offset = 0;
3055 
3056  for (unsigned b = 0; b < n_block; b++)
3057  {
3058  // The actual block number required.
3059  const unsigned mapped_b = block_vec_number[b];
3060 
3061  // How many most fine grain dofs are in this block?
3062  const unsigned n_dof = Block_to_dof_map_fine[mapped_b].size();
3063 
3064  if (n_dof == 1)
3065  // No need to concatenate, just copy the DoubleVector.
3066  {
3067  s[b] = dof_vector[offset];
3068  }
3069  else
3070  // Concatenate the relevant dof vectors into s[b].
3071  {
3072  s[b].build(Block_distribution_pt[mapped_b], 0);
3073  Vector<DoubleVector*> tmp_vec_pt(n_dof, 0);
3074  for (unsigned vec_i = 0; vec_i < n_dof; vec_i++)
3075  {
3076  tmp_vec_pt[vec_i] = &dof_vector[offset + vec_i];
3077  }
3078 
3080  s[b]);
3081  }
3082 
3083  // Update the offset.
3084  offset += n_dof;
3085  }
3086  } // get_block_vectors(...)
3087 
3088 
3089  //============================================================================
3090  /// Takes the naturally ordered vector and rearranges it into a
3091  /// vector of sub vectors corresponding to the blocks, so s[b][i] contains
3092  /// the i-th entry in the vector associated with block b.
3093  /// Note: If the preconditioner is a subsidiary preconditioner then only the
3094  /// sub-vectors associated with the blocks of the subsidiary preconditioner
3095  /// will be included. Hence the length of v is master_nrow() whereas the
3096  /// total length of the s vectors is Nrow.
3097  /// This is simply a wrapper around the other get_block_vectors(...) function
3098  /// where the block_vec_number Vector is the identity, i.e.
3099  /// block_vec_number is [0, 1, ..., nblock_types - 1].
3100  //============================================================================
3101  template<typename MATRIX>
3103  const DoubleVector& v, Vector<DoubleVector>& s) const
3104  {
3105  // Get the number of blocks in this block preconditioner.
3106  const unsigned n_block = nblock_types();
3107 
3108  // Create the identity vector.
3109  Vector<unsigned> required_block(n_block, 0);
3110  for (unsigned i = 0; i < n_block; i++)
3111  {
3112  required_block[i] = i;
3113  }
3114 
3115  // Call the other function which does the work.
3116  get_block_vectors(required_block, v, s);
3117  }
3118 
3119  //============================================================================
3120  /// Takes the naturally ordered vector and
3121  /// rearranges it into a vector of sub vectors corresponding to the blocks,
3122  /// so s[b][i] contains the i-th entry in the vector associated with block b.
3123  /// The block_vec_number indicates which blocks we want.
3124  /// These blocks and vectors are those corresponding to the internal blocks.
3125  /// Note: If the preconditioner is a subsidiary preconditioner then only the
3126  /// sub-vectors associated with the blocks of the subsidiary preconditioner
3127  /// will be included. Hence the length of v is master_nrow() whereas the
3128  /// total length of the s vectors is the sum of the Nrow of the sub vectors.
3129  //============================================================================
3130  template<typename MATRIX>
3132  const Vector<unsigned>& block_vec_number,
3133  const DoubleVector& v,
3134  Vector<DoubleVector>& s) const
3135  {
3136 #ifdef PARANOID
3137  if (!v.built())
3138  {
3139  std::ostringstream error_message;
3140  error_message << "The distribution of the global vector v must be setup.";
3141  throw OomphLibError(
3142  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3143  }
3144  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
3145  {
3146  std::ostringstream error_message;
3147  error_message << "The distribution of the global vector v must match the "
3148  << " specified master_distribution_pt(). \n"
3149  << "i.e. Distribution_pt in the master preconditioner";
3150  throw OomphLibError(
3151  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3152  }
3153 #endif
3154 
3155  // Number of block types
3156  // const unsigned nblock = this->internal_nblock_types();
3157  const unsigned nblock = block_vec_number.size();
3158 
3159  // if + only one processor
3160  // + more than one processor but matrix_pt is not distributed
3161  // then use the serial get_block method
3162  if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
3163  !this->distribution_pt()->distributed())
3164  {
3165  // Vector of vectors for each section of residual vector
3166  s.resize(nblock);
3167 
3168  // pointer to the data in v
3169  const double* v_pt = v.values_pt();
3170 
3171  // setup the block vector and then insert the data
3172  for (unsigned b = 0; b < nblock; b++)
3173  {
3174  const unsigned required_block = block_vec_number[b];
3175  s[b].build(Internal_block_distribution_pt[required_block], 0.0);
3176  double* s_pt = s[b].values_pt();
3177  unsigned nrow = s[b].nrow();
3178  for (unsigned i = 0; i < nrow; i++)
3179  {
3180  s_pt[i] = v_pt[this->Global_index[required_block][i]];
3181  }
3182  }
3183  }
3184  // otherwise use mpi
3185  else
3186  {
3187 #ifdef OOMPH_HAS_MPI
3188  // my rank
3189  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
3190 
3191  // the number of processors
3192  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
3193 
3194  // build the vectors
3195  s.resize(nblock);
3196  for (unsigned b = 0; b < nblock; b++)
3197  {
3198  const unsigned required_block = block_vec_number[b];
3199  s[b].build(Internal_block_distribution_pt[required_block], 0.0);
3200  }
3201 
3202  // determine the maximum number of rows to be sent or recv
3203  // and determine the number of blocks each processor will send and recv
3204  // communication for
3205  Vector<int> nblock_send(nproc, 0);
3206  Vector<int> nblock_recv(nproc, 0);
3207  unsigned max_n_send_or_recv = 0;
3208  for (unsigned p = 0; p < nproc; p++)
3209  {
3210  for (unsigned b = 0; b < nblock; b++)
3211  {
3212  const unsigned required_block = block_vec_number[b];
3213  max_n_send_or_recv = std::max(
3214  max_n_send_or_recv, Nrows_to_send_for_get_block(required_block, p));
3215  max_n_send_or_recv = std::max(
3216  max_n_send_or_recv, Nrows_to_recv_for_get_block(required_block, p));
3217  if (Nrows_to_send_for_get_block(required_block, p) > 0)
3218  {
3219  nblock_send[p]++;
3220  }
3221  if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3222  {
3223  nblock_recv[p]++;
3224  }
3225  }
3226  }
3227 
3228  // create a vectors of 1s the size of the nblock for the mpi indexed
3229  // data types
3230  int* block_lengths = new int[max_n_send_or_recv];
3231  for (unsigned i = 0; i < max_n_send_or_recv; i++)
3232  {
3233  block_lengths[i] = 1;
3234  }
3235 
3236  // perform the sends and receives
3237  Vector<MPI_Request> requests;
3238  for (unsigned p = 0; p < nproc; p++)
3239  {
3240  // send and recv with other processors
3241  if (p != my_rank)
3242  {
3243  // send
3244  if (nblock_send[p] > 0)
3245  {
3246  // create the datatypes vector
3247  MPI_Datatype block_send_types[nblock_send[p]];
3248 
3249  // create the datatypes
3250  unsigned ptr = 0;
3251  for (unsigned b = 0; b < nblock; b++)
3252  {
3253  const unsigned required_block = block_vec_number[b];
3254 
3255  if (Nrows_to_send_for_get_block(required_block, p) > 0)
3256  {
3257  MPI_Type_indexed(Nrows_to_send_for_get_block(required_block, p),
3258  block_lengths,
3259  Rows_to_send_for_get_block(required_block, p),
3260  MPI_DOUBLE,
3261  &block_send_types[ptr]);
3262  MPI_Type_commit(&block_send_types[ptr]);
3263  ptr++;
3264  }
3265  }
3266 
3267  // compute the displacements and lengths
3268  MPI_Aint displacements[nblock_send[p]];
3269  int lengths[nblock_send[p]];
3270  for (int i = 0; i < nblock_send[p]; i++)
3271  {
3272  lengths[i] = 1;
3273  displacements[i] = 0;
3274  }
3275 
3276  // build the final datatype
3277  MPI_Datatype type_send;
3278  MPI_Type_create_struct(nblock_send[p],
3279  lengths,
3280  displacements,
3281  block_send_types,
3282  &type_send);
3283  MPI_Type_commit(&type_send);
3284 
3285  // send
3286  MPI_Request send_req;
3287  MPI_Isend(const_cast<double*>(v.values_pt()),
3288  1,
3289  type_send,
3290  p,
3291  0,
3292  this->distribution_pt()->communicator_pt()->mpi_comm(),
3293  &send_req);
3294  MPI_Type_free(&type_send);
3295  for (int i = 0; i < nblock_send[p]; i++)
3296  {
3297  MPI_Type_free(&block_send_types[i]);
3298  }
3299  requests.push_back(send_req);
3300  }
3301 
3302  // recv
3303  if (nblock_recv[p] > 0)
3304  {
3305  // create the datatypes vector
3306  MPI_Datatype block_recv_types[nblock_recv[p]];
3307 
3308  // and the displacements
3309  MPI_Aint displacements[nblock_recv[p]];
3310 
3311  // and the lengths
3312  int lengths[nblock_recv[p]];
3313 
3314  // all displacements are computed relative to s[0] values
3315  MPI_Aint displacements_base;
3316  MPI_Get_address(s[0].values_pt(), &displacements_base);
3317 
3318  // now build
3319  unsigned ptr = 0;
3320  for (unsigned b = 0; b < nblock; b++)
3321  {
3322  const unsigned required_block = block_vec_number[b];
3323 
3324  if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3325  {
3326  MPI_Type_indexed(Nrows_to_recv_for_get_block(required_block, p),
3327  block_lengths,
3328  Rows_to_recv_for_get_block(required_block, p),
3329  MPI_DOUBLE,
3330  &block_recv_types[ptr]);
3331  MPI_Type_commit(&block_recv_types[ptr]);
3332  MPI_Get_address(s[b].values_pt(), &displacements[ptr]);
3333  displacements[ptr] -= displacements_base;
3334  lengths[ptr] = 1;
3335  ptr++;
3336  }
3337  }
3338 
3339  // build the final data type
3340  MPI_Datatype type_recv;
3341  MPI_Type_create_struct(nblock_recv[p],
3342  lengths,
3343  displacements,
3344  block_recv_types,
3345  &type_recv);
3346  MPI_Type_commit(&type_recv);
3347 
3348  // recv
3349  MPI_Request recv_req;
3350  MPI_Irecv(s[0].values_pt(),
3351  1,
3352  type_recv,
3353  p,
3354  0,
3355  this->distribution_pt()->communicator_pt()->mpi_comm(),
3356  &recv_req);
3357  MPI_Type_free(&type_recv);
3358  for (int i = 0; i < nblock_recv[p]; i++)
3359  {
3360  MPI_Type_free(&block_recv_types[i]);
3361  }
3362  requests.push_back(recv_req);
3363  }
3364  }
3365 
3366  // communicate with self
3367  else
3368  {
3369  const double* v_values_pt = v.values_pt();
3370  for (unsigned b = 0; b < nblock; b++)
3371  {
3372  const unsigned required_block = block_vec_number[b];
3373 
3374  double* w_values_pt = s[b].values_pt();
3375  for (unsigned i = 0;
3376  i < Nrows_to_send_for_get_block(required_block, p);
3377  i++)
3378  {
3379  w_values_pt[Rows_to_recv_for_get_block(required_block, p)[i]] =
3380  v_values_pt[Rows_to_send_for_get_block(required_block, p)[i]];
3381  }
3382  }
3383  }
3384  }
3385 
3386  // and then just wait
3387  unsigned c = requests.size();
3388  Vector<MPI_Status> stat(c);
3389  if (c)
3390  {
3391  MPI_Waitall(c, &requests[0], &stat[0]);
3392  }
3393  delete[] block_lengths;
3394 
3395 #else
3396  // throw error
3397  std::ostringstream error_message;
3398  error_message << "The preconditioner is distributed and on more than one "
3399  << "processor. MPI is required.";
3400  throw OomphLibError(
3401  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3402 #endif
3403  }
3404  }
3405 
3406  //============================================================================
3407  /// A helper function, takes the naturally ordered vector and
3408  /// rearranges it into a vector of sub vectors corresponding to the blocks,
3409  /// so s[b][i] contains the i-th entry in the vector associated with block b.
3410  /// The block_vec_number indicates which blocks we want.
3411  /// These blocks and vectors are those corresponding to the internal blocks.
3412  /// Note: If the preconditioner is a subsidiary preconditioner then only the
3413  /// sub-vectors associated with the blocks of the subsidiary preconditioner
3414  /// will be included. Hence the length of v is master_nrow() whereas the
3415  /// total length of the s vectors is the sum of the Nrow of the sub vectors.
3416  /// This is simply a wrapper around the other internal_get_block_vectors(...)
3417  /// function with the identity block_vec_number vector.
3418  //============================================================================
3419  template<typename MATRIX>
3421  const DoubleVector& v, Vector<DoubleVector>& s) const
3422  {
3423  // Number of block types
3424  const unsigned nblock = this->internal_nblock_types();
3425  Vector<unsigned> block_vec_number(nblock, 0);
3426  for (unsigned b = 0; b < nblock; b++)
3427  {
3428  block_vec_number[b] = b;
3429  }
3430 
3431  internal_get_block_vectors(block_vec_number, v, s);
3432  }
3433 
3434  //============================================================================
3435  /// Takes the vector of block vectors, s, and copies its entries into
3436  /// the naturally ordered vector, v. If this is a subsidiary block
3437  /// preconditioner only those entries in v that are associated with its
3438  /// blocks are affected. The block_vec_number indicates which block the
3439  /// vectors in s came from. The block number corresponds to the block
3440  /// numbers in this preconditioner.
3441  //============================================================================
3442  template<typename MATRIX>
3444  const Vector<unsigned>& block_vec_number,
3445  const Vector<DoubleVector>& s,
3446  DoubleVector& v) const
3447  {
3448 #ifdef PARANOID
3449 
3450  // Check if v is built.
3451  if (!v.built())
3452  {
3453  std::ostringstream err_msg;
3454  err_msg << "The distribution of the global vector v must be setup.";
3455  throw OomphLibError(
3456  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3457  }
3458 
3459  // v must have the same distribution as the upper-most master block
3460  // preconditioner, since the upper-most master block preconditioner
3461  // should have the same distribution as the matrix pointed to
3462  // by matrix_pt().
3463  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
3464  {
3465  std::ostringstream err_msg;
3466  err_msg << "The distribution of the global vector v must match the "
3467  << " specified master_distribution_pt(). \n"
3468  << "i.e. Distribution_pt in the master preconditioner";
3469  throw OomphLibError(
3470  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3471  }
3472 
3473  // Check if the number of vectors in s is the same as the number of block
3474  // numbers described in block_vec_number.
3475  const unsigned para_block_vec_number_size = block_vec_number.size();
3476  const unsigned para_s_size = s.size();
3477  if (para_block_vec_number_size != para_s_size)
3478  {
3479  std::ostringstream err_msg;
3480  err_msg << "block_vec_number.size() is " << para_block_vec_number_size
3481  << "\n."
3482  << "s.size() is " << para_s_size << ".\n"
3483  << "But they must be the same size!\n";
3484  throw OomphLibError(
3485  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3486  }
3487 
3488  // Check to see if there are more blocks defined in the block_vec_number
3489  // vector than the number of block types. This is not allowed.
3490  const unsigned para_n_block = nblock_types();
3491  if (para_block_vec_number_size > para_n_block)
3492  {
3493  std::ostringstream err_msg;
3494  err_msg << "Trying to return " << para_block_vec_number_size
3495  << " block vectors.\n"
3496  << "But there are only " << para_n_block << " block types.\n";
3497  throw OomphLibError(
3498  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3499  }
3500 
3501  // Check if any block numbers defined in block_vec_number is equal to or
3502  // greater than the number of block types.
3503  // E.g. if there are 5 block types, we can only have block numbers:
3504  // 0, 1, 2, 3 and 4.
3505  for (unsigned b = 0; b < para_block_vec_number_size; b++)
3506  {
3507  const unsigned para_required_block = block_vec_number[b];
3508  if (para_required_block > para_n_block)
3509  {
3510  std::ostringstream err_msg;
3511  err_msg << "block_vec_number[" << b << "] is " << para_required_block
3512  << ".\n"
3513  << "But there are only " << para_n_block << " block types.\n";
3514  throw OomphLibError(
3515  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3516  }
3517  }
3518 
3519  // Check that no block number is inserted twice.
3520  std::set<unsigned> para_set;
3521  for (unsigned b = 0; b < para_block_vec_number_size; b++)
3522  {
3523  std::pair<std::set<unsigned>::iterator, bool> para_set_ret;
3524  para_set_ret = para_set.insert(block_vec_number[b]);
3525 
3526  if (!para_set_ret.second)
3527  {
3528  std::ostringstream err_msg;
3529  err_msg << "Error: the block number " << block_vec_number[b]
3530  << " appears twice.\n";
3531  throw OomphLibError(
3532  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3533  }
3534  }
3535 
3536  // Check to see that all the vectors in s are built
3537  // (since we are trying to return them).
3538  for (unsigned b = 0; b < para_block_vec_number_size; b++)
3539  {
3540  if (!s[b].built())
3541  {
3542  std::ostringstream err_msg;
3543  err_msg << "The distribution of the block vector s[" << b
3544  << "] must be setup.\n";
3545  throw OomphLibError(
3546  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3547  }
3548  }
3549 
3550  // Since these are built, we check that the distributions are correct.
3551  // This are incorrect if the block numbers in block_vec_number and
3552  // the vectors in s does not match.
3553  for (unsigned b = 0; b < para_block_vec_number_size; b++)
3554  {
3555  if (*(s[b].distribution_pt()) !=
3556  *(Block_distribution_pt[block_vec_number[b]]))
3557  {
3558  std::ostringstream error_message;
3559  error_message
3560  << "The distribution of the block vector " << b << " must match the"
3561  << " specified distribution at "
3562  << "Block_distribution_pt[" << block_vec_number[b] << "].\n"
3563  << "The distribution of the Block_distribution_pt is determined by\n"
3564  << "the vector block_vec_number. Perhaps it is incorrect?\n";
3565  throw OomphLibError(error_message.str(),
3566  OOMPH_CURRENT_FUNCTION,
3567  OOMPH_EXCEPTION_LOCATION);
3568  }
3569  }
3570 #endif
3571 
3572  // Number of blocks to get.
3573  const unsigned n_block = block_vec_number.size();
3574 
3575  // Each block is made of dof types. We get the most fine grain dof types.
3576  // Most fine grain in the sense that these are the dof types that belongs
3577  // in this block before any coarsening of dof types has taken place.
3578  // The ordering of the dof types matters, this is handled properly when
3579  // creating the Block_to_dof_map_fine vector and must be respected here.
3580  // I.e. we cannot arbitrarily insert dof types (even if they are correct)
3581  // in the vector most_fine_grain_dof.
3582  Vector<unsigned> most_fine_grain_dof;
3583  for (unsigned b = 0; b < n_block; b++)
3584  {
3585  const unsigned mapped_b = block_vec_number[b];
3586 
3587  most_fine_grain_dof.insert(most_fine_grain_dof.end(),
3588  Block_to_dof_map_fine[mapped_b].begin(),
3589  Block_to_dof_map_fine[mapped_b].end());
3590  }
3591 
3592  // Split all the blocks into it's most fine grain dof vector.
3593  Vector<DoubleVector> dof_vector(most_fine_grain_dof.size());
3594 
3595  unsigned offset = 0;
3596 
3597  // Perform the splitting for each block.
3598  for (unsigned b = 0; b < n_block; b++)
3599  {
3600  // The actual block number.
3601  const unsigned mapped_b = block_vec_number[b];
3602 
3603  // How many most fine grain dof types are associated with this block?
3604  const unsigned ndof = Block_to_dof_map_fine[mapped_b].size();
3605 
3606  if (ndof == 1)
3607  // No need to split, just copy.
3608  {
3609  dof_vector[offset] = s[b];
3610  }
3611  else
3612  // Need to split s[b] into it's most fine grain dof vectors
3613  {
3614  // To store pointers to the dof vectors associated with this block.
3615  Vector<DoubleVector*> tmp_dof_vector_pt(ndof, 0);
3616 
3617  for (unsigned d = 0; d < ndof; d++)
3618  {
3619  const unsigned offset_plus_d = offset + d;
3620 
3621  // build the dof vector.
3622  dof_vector[offset_plus_d].build(
3623  Internal_block_distribution_pt[most_fine_grain_dof[offset_plus_d]]);
3624 
3625  // Store the pointer.
3626  tmp_dof_vector_pt[d] = &dof_vector[offset_plus_d];
3627  }
3628 
3629  // Split without communication.
3631  tmp_dof_vector_pt);
3632  }
3633 
3634  // Update the offset!
3635  offset += ndof;
3636  }
3637 
3638  // Return the block vectors all in one go.
3639  internal_return_block_vectors(most_fine_grain_dof, dof_vector, v);
3640  } // return_block_vectors(...)
3641 
3642 
3643  //============================================================================
3644  /// Takes the vector of block vectors, s, and copies its entries into
3645  /// the naturally ordered vector, v. If this is a subsidiary block
3646  /// preconditioner only those entries in v that are associated with its
3647  /// blocks are affected. The block_vec_number indicates which block the
3648  /// vectors in s came from. The block number corresponds to the block
3649  /// numbers in this preconditioner.
3650  /// This is simply a wrapper around the other return_block_vectors(...)
3651  /// function where the block_vec_number Vector is the identity, i.e.
3652  /// block_vec_number is [0, 1, ..., nblock_types - 1].
3653  //============================================================================
3654  template<typename MATRIX>
3656  const Vector<DoubleVector>& s, DoubleVector& v) const
3657  {
3658  // The number of block types in this preconditioner.
3659  const unsigned n_block = nblock_types();
3660 
3661  // Create the identity vector.
3662  Vector<unsigned> required_block(n_block, 0);
3663  for (unsigned i = 0; i < n_block; i++)
3664  {
3665  required_block[i] = i;
3666  }
3667 
3668  // Call the other return_block_vectors function which does the work.
3669  return_block_vectors(required_block, s, v);
3670  } // return_block_vectors(...)
3671 
3672  //============================================================================
3673  /// Takes the naturally ordered vector and
3674  /// rearranges it into a vector of sub vectors corresponding to the blocks,
3675  /// so s[b][i] contains the i-th entry in the vector associated with block b.
3676  /// The block_vec_number indicates which blocks we want.
3677  /// These blocks and vectors are those corresponding to the internal blocks.
3678  /// Note: If the preconditioner is a subsidiary preconditioner then only the
3679  /// sub-vectors associated with the blocks of the subsidiary preconditioner
3680  /// will be included. Hence the length of v is master_nrow() whereas the
3681  /// total length of the s vectors is the sum of the Nrow of the sub vectors.
3682  //============================================================================
3683  template<typename MATRIX>
3685  const Vector<unsigned>& block_vec_number,
3686  const Vector<DoubleVector>& s,
3687  DoubleVector& v) const
3688  {
3689  // the number of blocks
3690  const unsigned nblock = block_vec_number.size();
3691 
3692 #ifdef PARANOID
3693  if (!v.built())
3694  {
3695  std::ostringstream error_message;
3696  error_message << "The distribution of the global vector v must be setup.";
3697  throw OomphLibError(
3698  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3699  }
3700  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
3701  {
3702  std::ostringstream error_message;
3703  error_message << "The distribution of the global vector v must match the "
3704  << " specified master_distribution_pt(). \n"
3705  << "i.e. Distribution_pt in the master preconditioner";
3706  throw OomphLibError(
3707  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3708  }
3709  for (unsigned b = 0; b < nblock; b++)
3710  {
3711  if (!s[b].built())
3712  {
3713  std::ostringstream error_message;
3714  error_message << "The distribution of the block vector " << b
3715  << " must be setup.";
3716  throw OomphLibError(error_message.str(),
3717  OOMPH_CURRENT_FUNCTION,
3718  OOMPH_EXCEPTION_LOCATION);
3719  }
3720  const unsigned required_block = block_vec_number[b];
3721  if (*(s[b].distribution_pt()) !=
3722  *(Internal_block_distribution_pt[required_block]))
3723  {
3724  std::ostringstream error_message;
3725  error_message
3726  << "The distribution of the block vector " << b << " must match the"
3727  << " specified distribution at Internal_block_distribution_pt[" << b
3728  << "]";
3729  throw OomphLibError(error_message.str(),
3730  OOMPH_CURRENT_FUNCTION,
3731  OOMPH_EXCEPTION_LOCATION);
3732  }
3733  }
3734 #endif
3735 
3736  // if + only one processor
3737  // + more than one processor but matrix_pt is not distributed
3738  // then use the serial get_block method
3739  if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
3740  !this->distribution_pt()->distributed())
3741  {
3742  double* v_pt = v.values_pt();
3743  for (unsigned b = 0; b < nblock; b++)
3744  {
3745  const unsigned required_block = block_vec_number[b];
3746 
3747  const double* s_pt = s[b].values_pt();
3748  unsigned nrow = this->internal_block_dimension(required_block);
3749  for (unsigned i = 0; i < nrow; i++)
3750  {
3751  v_pt[this->Global_index[required_block][i]] = s_pt[i];
3752  }
3753  }
3754  }
3755  // otherwise use mpi
3756  else
3757  {
3758 #ifdef OOMPH_HAS_MPI
3759 
3760  // my rank
3761  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
3762 
3763  // the number of processors
3764  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
3765 
3766  // determine the maximum number of rows to be sent or recv
3767  // and determine the number of blocks each processor will send and recv
3768  // communication for
3769  Vector<int> nblock_send(nproc, 0);
3770  Vector<int> nblock_recv(nproc, 0);
3771  unsigned max_n_send_or_recv = 0;
3772  for (unsigned p = 0; p < nproc; p++)
3773  {
3774  for (unsigned b = 0; b < nblock; b++)
3775  {
3776  const unsigned required_block = block_vec_number[b];
3777 
3778  max_n_send_or_recv = std::max(
3779  max_n_send_or_recv, Nrows_to_send_for_get_block(required_block, p));
3780  max_n_send_or_recv = std::max(
3781  max_n_send_or_recv, Nrows_to_recv_for_get_block(required_block, p));
3782  if (Nrows_to_send_for_get_block(required_block, p) > 0)
3783  {
3784  nblock_recv[p]++;
3785  }
3786  if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3787  {
3788  nblock_send[p]++;
3789  }
3790  }
3791  }
3792 
3793  // create a vectors of 1s the size of the nblock for the mpi indexed
3794  // data types
3795  int* block_lengths = new int[max_n_send_or_recv];
3796  for (unsigned i = 0; i < max_n_send_or_recv; i++)
3797  {
3798  block_lengths[i] = 1;
3799  }
3800 
3801  // perform the sends and receives
3802  Vector<MPI_Request> requests;
3803  for (unsigned p = 0; p < nproc; p++)
3804  {
3805  // send and recv with other processors
3806  if (p != my_rank)
3807  {
3808  // recv
3809  if (nblock_recv[p] > 0)
3810  {
3811  // create the datatypes vector
3812  MPI_Datatype block_recv_types[nblock_recv[p]];
3813 
3814  // create the datatypes
3815  unsigned ptr = 0;
3816  for (unsigned b = 0; b < nblock; b++)
3817  {
3818  const unsigned required_block = block_vec_number[b];
3819 
3820  if (Nrows_to_send_for_get_block(required_block, p) > 0)
3821  {
3822  MPI_Type_indexed(Nrows_to_send_for_get_block(required_block, p),
3823  block_lengths,
3824  Rows_to_send_for_get_block(required_block, p),
3825  MPI_DOUBLE,
3826  &block_recv_types[ptr]);
3827  MPI_Type_commit(&block_recv_types[ptr]);
3828  ptr++;
3829  }
3830  }
3831 
3832  // compute the displacements and lengths
3833  MPI_Aint displacements[nblock_recv[p]];
3834  int lengths[nblock_recv[p]];
3835  for (int i = 0; i < nblock_recv[p]; i++)
3836  {
3837  lengths[i] = 1;
3838  displacements[i] = 0;
3839  }
3840 
3841  // build the final datatype
3842  MPI_Datatype type_recv;
3843  MPI_Type_create_struct(nblock_recv[p],
3844  lengths,
3845  displacements,
3846  block_recv_types,
3847  &type_recv);
3848  MPI_Type_commit(&type_recv);
3849 
3850  // recv
3851  MPI_Request recv_req;
3852  MPI_Irecv(v.values_pt(),
3853  1,
3854  type_recv,
3855  p,
3856  0,
3857  this->distribution_pt()->communicator_pt()->mpi_comm(),
3858  &recv_req);
3859  MPI_Type_free(&type_recv);
3860  for (int i = 0; i < nblock_recv[p]; i++)
3861  {
3862  MPI_Type_free(&block_recv_types[i]);
3863  }
3864  requests.push_back(recv_req);
3865  }
3866 
3867  // send
3868  if (nblock_send[p] > 0)
3869  {
3870  // create the datatypes vector
3871  MPI_Datatype block_send_types[nblock_send[p]];
3872 
3873  // and the displacements
3874  MPI_Aint displacements[nblock_send[p]];
3875 
3876  // and the lengths
3877  int lengths[nblock_send[p]];
3878 
3879  // all displacements are computed relative to s[0] values
3880  MPI_Aint displacements_base;
3881  MPI_Get_address(const_cast<double*>(s[0].values_pt()),
3882  &displacements_base);
3883 
3884  // now build
3885  unsigned ptr = 0;
3886  for (unsigned b = 0; b < nblock; b++)
3887  {
3888  const unsigned required_block = block_vec_number[b];
3889 
3890  if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3891  {
3892  MPI_Type_indexed(Nrows_to_recv_for_get_block(required_block, p),
3893  block_lengths,
3894  Rows_to_recv_for_get_block(required_block, p),
3895  MPI_DOUBLE,
3896  &block_send_types[ptr]);
3897  MPI_Type_commit(&block_send_types[ptr]);
3898  MPI_Get_address(const_cast<double*>(s[b].values_pt()),
3899  &displacements[ptr]);
3900  displacements[ptr] -= displacements_base;
3901  lengths[ptr] = 1;
3902  ptr++;
3903  }
3904  }
3905 
3906  // build the final data type
3907  MPI_Datatype type_send;
3908  MPI_Type_create_struct(nblock_send[p],
3909  lengths,
3910  displacements,
3911  block_send_types,
3912  &type_send);
3913  MPI_Type_commit(&type_send);
3914 
3915  // send
3916  MPI_Request send_req;
3917  MPI_Isend(const_cast<double*>(s[0].values_pt()),
3918  1,
3919  type_send,
3920  p,
3921  0,
3922  this->distribution_pt()->communicator_pt()->mpi_comm(),
3923  &send_req);
3924  MPI_Type_free(&type_send);
3925  for (int i = 0; i < nblock_send[p]; i++)
3926  {
3927  MPI_Type_free(&block_send_types[i]);
3928  }
3929  requests.push_back(send_req);
3930  }
3931  }
3932 
3933  // communicate wih self
3934  else
3935  {
3936  double* v_values_pt = v.values_pt();
3937  for (unsigned b = 0; b < nblock; b++)
3938  {
3939  const unsigned required_block = block_vec_number[b];
3940 
3941  const double* w_values_pt = s[b].values_pt();
3942  for (unsigned i = 0;
3943  i < Nrows_to_send_for_get_block(required_block, p);
3944  i++)
3945  {
3946  v_values_pt[Rows_to_send_for_get_block(required_block, p)[i]] =
3947  w_values_pt[Rows_to_recv_for_get_block(required_block, p)[i]];
3948  }
3949  }
3950  }
3951  }
3952 
3953  // and then just wait
3954  unsigned c = requests.size();
3955  Vector<MPI_Status> stat(c);
3956  if (c)
3957  {
3958  MPI_Waitall(c, &requests[0], &stat[0]);
3959  }
3960  delete[] block_lengths;
3961 
3962 #else
3963  // throw error
3964  std::ostringstream error_message;
3965  error_message << "The preconditioner is distributed and on more than one "
3966  << "processor. MPI is required.";
3967  throw OomphLibError(
3968  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3969 #endif
3970  }
3971  }
3972 
3973  //============================================================================
3974  /// A helper function, takes the naturally ordered vector and
3975  /// rearranges it into a vector of sub vectors corresponding to the blocks,
3976  /// so s[b][i] contains the i-th entry in the vector associated with block b.
3977  /// The block_vec_number indicates which blocks we want.
3978  /// These blocks and vectors are those corresponding to the internal blocks.
3979  /// Note: If the preconditioner is a subsidiary preconditioner then only the
3980  /// sub-vectors associated with the blocks of the subsidiary preconditioner
3981  /// will be included. Hence the length of v is master_nrow() whereas the
3982  /// total length of the s vectors is the sum of the Nrow of the sub vectors.
3983  /// This is simply a wrapper around the other internal_get_block_vectors(...)
3984  /// function with the identity block_vec_number vector.
3985  //============================================================================
3986  template<typename MATRIX>
3988  const Vector<DoubleVector>& s, DoubleVector& v) const
3989  {
3990  // the number of blocks
3991  const unsigned nblock = this->internal_nblock_types();
3992  Vector<unsigned> block_vec_number(nblock, 0);
3993  for (unsigned b = 0; b < nblock; b++)
3994  {
3995  block_vec_number[b] = b;
3996  }
3997 
3998  internal_return_block_vectors(block_vec_number, s, v);
3999  }
4000 
4001  //============================================================================
4002  /// A helper function, takes the naturally ordered vector, v,
4003  /// and extracts the n-th block vector, b.
4004  /// Here n is the block number in the current preconditioner.
4005  /// NOTE: The ordering of the vector b is the same as the
4006  /// ordering of the block matrix from internal_get_block(...).
4007  //============================================================================
4008  template<typename MATRIX>
4010  const unsigned& b, const DoubleVector& v, DoubleVector& w) const
4011  {
4012 #ifdef PARANOID
4013  // the number of blocks
4014  const unsigned n_blocks = this->internal_nblock_types();
4015 
4016  // paranoid check that block i is in this block preconditioner
4017  if (b >= n_blocks)
4018  {
4019  std::ostringstream error_message;
4020  error_message
4021  << "Requested block vector " << b
4022  << ", however this preconditioner has internal_nblock_types() "
4023  << "= " << internal_nblock_types() << std::endl;
4024  throw OomphLibError(
4025  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4026  }
4027  if (!v.built())
4028  {
4029  std::ostringstream error_message;
4030  error_message << "The distribution of the global vector v must be setup.";
4031  throw OomphLibError(
4032  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4033  }
4034  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
4035  {
4036  std::ostringstream error_message;
4037  error_message << "The distribution of the global vector v must match the "
4038  << " specified master_distribution_pt(). \n"
4039  << "i.e. Distribution_pt in the master preconditioner";
4040  throw OomphLibError(
4041  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4042  }
4043 #endif
4044 
4045  // rebuild the block vector
4046  w.build(Internal_block_distribution_pt[b], 0.0);
4047 
4048  // if + only one processor
4049  // + more than one processor but matrix_pt is not distributed
4050  // then use the serial get_block method
4051  if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4052  !this->distribution_pt()->distributed())
4053  {
4054  double* w_pt = w.values_pt();
4055  const double* v_pt = v.values_pt();
4056  unsigned n_row = w.nrow();
4057  for (unsigned i = 0; i < n_row; i++)
4058  {
4059  w_pt[i] = v_pt[this->Global_index[b][i]];
4060  }
4061  }
4062  // otherwise use mpi
4063  else
4064  {
4065 #ifdef OOMPH_HAS_MPI
4066 
4067  // my rank
4068  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4069 
4070  // the number of processors
4071  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4072 
4073  // determine the maximum number of rows to be sent or recv
4074  unsigned max_n_send_or_recv = 0;
4075  for (unsigned p = 0; p < nproc; p++)
4076  {
4077  max_n_send_or_recv =
4078  std::max(max_n_send_or_recv, Nrows_to_send_for_get_block(b, p));
4079  max_n_send_or_recv =
4080  std::max(max_n_send_or_recv, Nrows_to_recv_for_get_block(b, p));
4081  }
4082 
4083  // create a vectors of 1s (the size of the nblock for the mpi indexed
4084  // data types
4085  int* block_lengths = new int[max_n_send_or_recv];
4086  for (unsigned i = 0; i < max_n_send_or_recv; i++)
4087  {
4088  block_lengths[i] = 1;
4089  }
4090 
4091  // perform the sends and receives
4092  Vector<MPI_Request> requests;
4093  for (unsigned p = 0; p < nproc; p++)
4094  {
4095  // send and recv with other processors
4096  if (p != my_rank)
4097  {
4098  if (Nrows_to_send_for_get_block(b, p) > 0)
4099  {
4100  // create the send datatype
4101  MPI_Datatype type_send;
4102  MPI_Type_indexed(Nrows_to_send_for_get_block(b, p),
4103  block_lengths,
4104  Rows_to_send_for_get_block(b, p),
4105  MPI_DOUBLE,
4106  &type_send);
4107  MPI_Type_commit(&type_send);
4108 
4109  // send
4110  MPI_Request send_req;
4111  MPI_Isend(const_cast<double*>(v.values_pt()),
4112  1,
4113  type_send,
4114  p,
4115  0,
4116  this->distribution_pt()->communicator_pt()->mpi_comm(),
4117  &send_req);
4118  MPI_Type_free(&type_send);
4119  requests.push_back(send_req);
4120  }
4121 
4122  if (Nrows_to_recv_for_get_block(b, p) > 0)
4123  {
4124  // create the recv datatype
4125  MPI_Datatype type_recv;
4126  MPI_Type_indexed(Nrows_to_recv_for_get_block(b, p),
4127  block_lengths,
4128  Rows_to_recv_for_get_block(b, p),
4129  MPI_DOUBLE,
4130  &type_recv);
4131  MPI_Type_commit(&type_recv);
4132 
4133  // recv
4134  MPI_Request recv_req;
4135  MPI_Irecv(w.values_pt(),
4136  1,
4137  type_recv,
4138  p,
4139  0,
4140  this->distribution_pt()->communicator_pt()->mpi_comm(),
4141  &recv_req);
4142  MPI_Type_free(&type_recv);
4143  requests.push_back(recv_req);
4144  }
4145  }
4146 
4147  // communicate with self
4148  else
4149  {
4150  double* w_values_pt = w.values_pt();
4151  const double* v_values_pt = v.values_pt();
4152  for (unsigned i = 0; i < Nrows_to_send_for_get_block(b, p); i++)
4153  {
4154  w_values_pt[Rows_to_recv_for_get_block(b, p)[i]] =
4155  v_values_pt[Rows_to_send_for_get_block(b, p)[i]];
4156  }
4157  }
4158  }
4159 
4160  // and then just wait
4161  unsigned c = requests.size();
4162  Vector<MPI_Status> stat(c);
4163  if (c)
4164  {
4165  MPI_Waitall(c, &requests[0], &stat[0]);
4166  }
4167  delete[] block_lengths;
4168 
4169 #else
4170  // throw error
4171  std::ostringstream error_message;
4172  error_message << "The preconditioner is distributed and on more than one "
4173  << "processor. MPI is required.";
4174  throw OomphLibError(
4175  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4176 #endif
4177  }
4178  }
4179 
4180  //============================================================================
4181  /// Takes the naturally ordered vector, v and returns the n-th
4182  /// block vector, b. Here n is the block number in the current
4183  /// preconditioner.
4184  //============================================================================
4185  template<typename MATRIX>
4187  const DoubleVector& v,
4188  DoubleVector& w) const
4189  {
4190 #ifdef PARANOID
4191  // the number of blocks
4192  const unsigned para_n_blocks = nblock_types();
4193 
4194  // paranoid check that block i is in this block preconditioner
4195  if (b >= para_n_blocks)
4196  {
4197  std::ostringstream err_msg;
4198  err_msg << "Requested block vector " << b
4199  << ", however this preconditioner has only " << para_n_blocks
4200  << " block types"
4201  << ".\n";
4202  throw OomphLibError(
4203  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4204  }
4205 
4206  if (!v.built())
4207  {
4208  std::ostringstream err_msg;
4209  err_msg << "The distribution of the global vector v must be setup.";
4210  throw OomphLibError(
4211  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4212  }
4213  if (*(v.distribution_pt()) != *(this->master_distribution_pt()))
4214  {
4215  std::ostringstream err_msg;
4216  err_msg << "The distribution of the global vector v must match the "
4217  << " specified master_distribution_pt(). \n"
4218  << "i.e. Distribution_pt in the master preconditioner";
4219  throw OomphLibError(
4220  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4221  }
4222 #endif
4223 
4224  // Recall that, the relationship between the external blocks and the
4225  // external dof types, as seen by the preconditioner writer is stored in the
4226  // mapping Block_to_dof_map_coarse.
4227  //
4228  // However, each dof type could have been coarsened! The relationship
4229  // between the dof types of this preconditioner and the parent
4230  // preconditioner is stored in the mapping Doftype_coarsen_map_coarse. The
4231  // dof numbers in this map is relative to this preconditioner.
4232  //
4233  // Finally, the relationship between the dof types of this preconditioner
4234  // and the most fine grain dof types is stored in the mapping
4235  // Doftype_coarsen_map_fine. Again, the dof numbers in this map is relative
4236  // to this preconditioner.
4237  //
4238  // Furthermore, we note that concatenation of vectors without communication
4239  // is associative, but not commutative. I.e.
4240  // (V1+V2)+V3 = V1 + (V2 + V3), where + is concatenation without
4241  // communication.
4242  //
4243  // So all we need is the vectors listed in the correct order.
4244  //
4245  // We need only Block_to_dof_map_coarse to tell us which external dof types
4246  // are in this block, then Doftype_coarsen_map_fine to tell us which most
4247  // fine grain dofs to concatenate!
4248  //
4249  // All the mapping vectors are constructed to respect the ordering of
4250  // the dof types.
4251 
4252  // Get the most fine grain block to dof mapping.
4253  Vector<unsigned> most_fine_grain_dof = Block_to_dof_map_fine[b];
4254 
4255  // How many vectors do we need to concatenate?
4256  const unsigned n_dof_vec = most_fine_grain_dof.size();
4257 
4258  if (n_dof_vec == 1)
4259  // No need to concatenate, just extract the vector.
4260  {
4261  internal_get_block_vector(most_fine_grain_dof[0], v, w);
4262  }
4263  else
4264  // Need to concatenate dof-level vectors.
4265  {
4266  Vector<DoubleVector> dof_vector(n_dof_vec);
4267 
4268  // Get all the dof-level vectors in one go
4269  internal_get_block_vectors(most_fine_grain_dof, v, dof_vector);
4270  // Build w with the correct distribution.
4271  w.build(Block_distribution_pt[b], 0);
4272 
4273  // Concatenate the vectors.
4275 
4276  dof_vector.clear();
4277  }
4278  } // get_block_vector(...)
4279 
4280  //============================================================================
4281  /// Takes the n-th block ordered vector, b, and copies its entries
4282  /// to the appropriate entries in the naturally ordered vector, v.
4283  /// Here n is the block number in the current block preconditioner.
4284  /// If the preconditioner is a subsidiary block preconditioner
4285  /// the other entries in v that are not associated with it
4286  /// are left alone.
4287  ///
4288  /// This version works with the internal block types. This is legacy code
4289  /// but is kept alive, hence moved to private. Please use the
4290  /// function "return_block_vector(...)".
4291  //============================================================================
4292  template<typename MATRIX>
4294  const unsigned& b, const DoubleVector& w, DoubleVector& v) const
4295  {
4296 #ifdef PARANOID
4297  // the number of blocks
4298  const unsigned n_blocks = this->internal_nblock_types();
4299 
4300  // paranoid check that block i is in this block preconditioner
4301  if (b >= n_blocks)
4302  {
4303  std::ostringstream error_message;
4304  error_message
4305  << "Requested block vector " << b
4306  << ", however this preconditioner has internal_nblock_types() "
4307  << "= " << internal_nblock_types() << std::endl;
4308  throw OomphLibError(
4309  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4310  }
4311  if (!v.built())
4312  {
4313  std::ostringstream error_message;
4314  error_message << "The distribution of the global vector v must be setup.";
4315  throw OomphLibError(
4316  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4317  }
4318  if (*v.distribution_pt() != *this->master_distribution_pt())
4319  {
4320  std::ostringstream error_message;
4321  error_message << "The distribution of the global vector v must match the "
4322  << " specified master_distribution_pt(). \n"
4323  << "i.e. Distribution_pt in the master preconditioner";
4324  throw OomphLibError(
4325  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4326  }
4327  if (!w.built())
4328  {
4329  std::ostringstream error_message;
4330  error_message << "The distribution of the block vector w must be setup.";
4331  throw OomphLibError(
4332  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4333  }
4334  if (*w.distribution_pt() != *Internal_block_distribution_pt[b])
4335  {
4336  std::ostringstream error_message;
4337  error_message
4338  << "The distribution of the block vector w must match the "
4339  << " specified distribution at Internal_block_distribution_pt[b]";
4340  throw OomphLibError(
4341  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4342  }
4343 #endif
4344 
4345  // if + only one processor
4346  // + more than one processor but matrix_pt is not distributed
4347  // then use the serial get_block method
4348  if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4349  !this->distribution_pt()->distributed())
4350  {
4351  // length of vector
4352  unsigned n_row = this->internal_block_dimension(b);
4353 
4354  // copy back from the block vector to the naturally ordered vector
4355  double* v_pt = v.values_pt();
4356  const double* w_pt = w.values_pt();
4357  for (unsigned i = 0; i < n_row; i++)
4358  {
4359  v_pt[this->Global_index[b][i]] = w_pt[i];
4360  }
4361  }
4362  // otherwise use mpi
4363  else
4364  {
4365 #ifdef OOMPH_HAS_MPI
4366 
4367  // my rank
4368  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4369 
4370  // the number of processors
4371  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4372 
4373  // determine the maximum number of rows to be sent or recv
4374  unsigned max_n_send_or_recv = 0;
4375  for (unsigned p = 0; p < nproc; p++)
4376  {
4377  max_n_send_or_recv =
4378  std::max(max_n_send_or_recv, Nrows_to_send_for_get_block(b, p));
4379  max_n_send_or_recv =
4380  std::max(max_n_send_or_recv, Nrows_to_recv_for_get_block(b, p));
4381  }
4382 
4383  // create a vectors of 1s (the size of the nblock for the mpi indexed
4384  // data types
4385  int* block_lengths = new int[max_n_send_or_recv];
4386  for (unsigned i = 0; i < max_n_send_or_recv; i++)
4387  {
4388  block_lengths[i] = 1;
4389  }
4390 
4391  // perform the sends and receives
4392  Vector<MPI_Request> requests;
4393  for (unsigned p = 0; p < nproc; p++)
4394  {
4395  // send and recv with other processors
4396  if (p != my_rank)
4397  {
4398  if (Nrows_to_recv_for_get_block(b, p) > 0)
4399  {
4400  // create the send datatype
4401  MPI_Datatype type_send;
4402  MPI_Type_indexed(Nrows_to_recv_for_get_block(b, p),
4403  block_lengths,
4404  Rows_to_recv_for_get_block(b, p),
4405  MPI_DOUBLE,
4406  &type_send);
4407  MPI_Type_commit(&type_send);
4408 
4409  // send
4410  MPI_Request send_req;
4411  MPI_Isend(const_cast<double*>(w.values_pt()),
4412  1,
4413  type_send,
4414  p,
4415  0,
4416  this->distribution_pt()->communicator_pt()->mpi_comm(),
4417  &send_req);
4418  MPI_Type_free(&type_send);
4419  requests.push_back(send_req);
4420  }
4421 
4422  if (Nrows_to_send_for_get_block(b, p) > 0)
4423  {
4424  // create the recv datatype
4425  MPI_Datatype type_recv;
4426  MPI_Type_indexed(Nrows_to_send_for_get_block(b, p),
4427  block_lengths,
4428  Rows_to_send_for_get_block(b, p),
4429  MPI_DOUBLE,
4430  &type_recv);
4431  MPI_Type_commit(&type_recv);
4432 
4433  // recv
4434  MPI_Request recv_req;
4435  MPI_Irecv(v.values_pt(),
4436  1,
4437  type_recv,
4438  p,
4439  0,
4440  this->distribution_pt()->communicator_pt()->mpi_comm(),
4441  &recv_req);
4442  MPI_Type_free(&type_recv);
4443  requests.push_back(recv_req);
4444  }
4445  }
4446 
4447  // communicate wih self
4448  else
4449  {
4450  const double* w_values_pt = w.values_pt();
4451  double* v_values_pt = v.values_pt();
4452  for (unsigned i = 0; i < Nrows_to_send_for_get_block(b, p); i++)
4453  {
4454  v_values_pt[Rows_to_send_for_get_block(b, p)[i]] =
4455  w_values_pt[Rows_to_recv_for_get_block(b, p)[i]];
4456  }
4457  }
4458  }
4459 
4460  // and then just wait
4461  unsigned c = requests.size();
4462  Vector<MPI_Status> stat(c);
4463  if (c)
4464  {
4465  MPI_Waitall(c, &requests[0], &stat[0]);
4466  }
4467  delete[] block_lengths;
4468 
4469 #else
4470  // throw error
4471  std::ostringstream error_message;
4472  error_message << "The preconditioner is distributed and on more than one "
4473  << "processor. MPI is required.";
4474  throw OomphLibError(
4475  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4476 #endif
4477  }
4478  }
4479 
4480  //============================================================================
4481  /// Takes the n-th block ordered vector, b, and copies its entries
4482  /// to the appropriate entries in the naturally ordered vector, v.
4483  /// Here n is the block number in the current block preconditioner.
4484  /// If the preconditioner is a subsidiary block preconditioner
4485  /// the other entries in v that are not associated with it
4486  /// are left alone.
4487  //============================================================================
4488  template<typename MATRIX>
4490  const DoubleVector& b,
4491  DoubleVector& v) const
4492  {
4493 #ifdef PARANOID
4494  // the number of blocks
4495  const unsigned para_n_blocks = nblock_types();
4496 
4497  // paranoid check that block i is in this block preconditioner
4498  if (n >= para_n_blocks)
4499  {
4500  std::ostringstream err_msg;
4501  err_msg << "Requested block vector " << b
4502  << ", however this preconditioner has " << para_n_blocks
4503  << " block types.\n";
4504  throw OomphLibError(
4505  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4506  }
4507  if (!v.built())
4508  {
4509  std::ostringstream err_msg;
4510  err_msg << "The distribution of the global vector v must be setup.";
4511  throw OomphLibError(
4512  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4513  }
4514  if (*v.distribution_pt() != *this->master_distribution_pt())
4515  {
4516  std::ostringstream err_msg;
4517  err_msg << "The distribution of the global vector v must match the "
4518  << " specified master_distribution_pt(). \n"
4519  << "i.e. Distribution_pt in the master preconditioner";
4520  throw OomphLibError(
4521  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4522  }
4523  if (!b.built())
4524  {
4525  std::ostringstream err_msg;
4526  err_msg << "The distribution of the block vector b must be setup.";
4527  throw OomphLibError(
4528  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4529  }
4530 
4531 #endif
4532 
4533  // Get the most fine grain dof
4534  Vector<unsigned> most_fine_grain_dof = Block_to_dof_map_fine[n];
4535 
4536  // How many dofs are in this block?
4537  const unsigned n_dof_vec = Block_to_dof_map_fine[n].size();
4538 
4539  if (n_dof_vec == 1)
4540  // There is only one dof, no need to split.
4541  {
4542  internal_return_block_vector(most_fine_grain_dof[0], b, v);
4543  }
4544  else
4545  // Need to split the vector up before we insert them all in one go.
4546  {
4547  Vector<DoubleVector> dof_vector(n_dof_vec);
4548  for (unsigned d = 0; d < n_dof_vec; d++)
4549  {
4550  dof_vector[d].build(
4551  internal_block_distribution_pt(most_fine_grain_dof[d]));
4552  }
4553 
4555 
4556  // return to v
4557  internal_return_block_vectors(most_fine_grain_dof, dof_vector, v);
4558  }
4559  } // return_block_vector(...)
4560 
4561  //============================================================================
4562  /// Given the naturally ordered vector, v, return
4563  /// the vector rearranged in block order in w. This is a legacy function
4564  /// from the old block preconditioning framework. Kept alive in case it may
4565  /// be needed again.
4566  ///
4567  /// This uses the variables ending in "get_ordered". We no longer use this
4568  /// type of method. This function copy values from v and re-order them
4569  /// in "block order" and place them in w. Block order means that the
4570  /// values in w are the same as the concatenated block vectors.
4571  ///
4572  /// I.e. - v is naturally ordered.
4573  /// v -> s_b, v is ordered into blocks vectors
4574  /// (requires communication)
4575  /// concatenate_without_communication(s_{0,...,nblocks},w) gives w.
4576  ///
4577  /// But this function skips out the concatenation part and builds w directly
4578  /// from v.
4579  ///
4580  /// This is nice but the function is implemented in such a way that it
4581  /// always use all the (internal) blocks and concatenated with the
4582  /// identity ordering. I.e. if this preconditioner has 3 block types, then
4583  /// w will always be:
4584  /// concatenate_without_communication([s_0, s_1, s_2], w). There is easy
4585  /// way to change this.
4586  ///
4587  /// Furthermore, it does not take into account the new dof type coarsening
4588  /// feature. So this function will most likely produce the incorrect vector
4589  /// w from what the user intended. It still works, but w will be the
4590  /// concatenation of the most fine grain dof block vectors with the
4591  /// "natural" dof type ordering.
4592  ///
4593  /// This has been superseded by the function
4594  /// get_block_ordered_preconditioner_vector(...) which does the correct
4595  /// thing.
4596  //============================================================================
4597  template<typename MATRIX>
4600  DoubleVector& w) const
4601  {
4602 #ifdef PARANOID
4603  if (!v.built())
4604  {
4605  std::ostringstream error_message;
4606  error_message << "The distribution of the global vector v must be setup.";
4607  throw OomphLibError(
4608  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4609  }
4610  if (*v.distribution_pt() != *this->master_distribution_pt())
4611  {
4612  std::ostringstream error_message;
4613  error_message << "The distribution of the global vector v must match the "
4614  << " specified master_distribution_pt(). \n"
4615  << "i.e. Distribution_pt in the master preconditioner";
4616  throw OomphLibError(
4617  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4618  }
4619 #endif
4620 
4621  // Cleared and resized w for reordered vector
4622  w.build(this->internal_preconditioner_matrix_distribution_pt(), 0.0);
4623 
4624  // if + only one processor
4625  // + more than one processor but matrix_pt is not distributed
4626  // then use the serial get_block method
4627  if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4628  !this->distribution_pt()->distributed())
4629  {
4630  // number of blocks
4631  unsigned nblock = this->Internal_nblock_types;
4632 
4633  // copy to w
4634  unsigned block_offset = 0;
4635  double* w_pt = w.values_pt();
4636  const double* v_pt = v.values_pt();
4637  for (unsigned b = 0; b < nblock; b++)
4638  {
4639  unsigned block_nrow = this->internal_block_dimension(b);
4640  for (unsigned i = 0; i < block_nrow; i++)
4641  {
4642  w_pt[block_offset + i] = v_pt[this->Global_index[b][i]];
4643  }
4644  block_offset += block_nrow;
4645  }
4646  }
4647  // otherwise use mpi
4648  else
4649  {
4650 #ifdef OOMPH_HAS_MPI
4651 
4652  // my rank
4653  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4654 
4655  // the number of processors
4656  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4657 
4658  // determine the maximum number of rows to be sent or recv
4659  unsigned max_n_send_or_recv = 0;
4660  for (unsigned p = 0; p < nproc; p++)
4661  {
4662  max_n_send_or_recv =
4663  std::max(max_n_send_or_recv, Nrows_to_send_for_get_ordered[p]);
4664  max_n_send_or_recv =
4665  std::max(max_n_send_or_recv, Nrows_to_recv_for_get_ordered[p]);
4666  }
4667 
4668  // create a vectors of 1s (the size of the nblock for the mpi indexed
4669  // data types
4670  int* block_lengths = new int[max_n_send_or_recv];
4671  for (unsigned i = 0; i < max_n_send_or_recv; i++)
4672  {
4673  block_lengths[i] = 1;
4674  }
4675 
4676  // perform the sends and receives
4677  Vector<MPI_Request> requests;
4678  for (unsigned p = 0; p < nproc; p++)
4679  {
4680  // send and recv with other processors
4681  if (p != my_rank)
4682  {
4683  if (Nrows_to_send_for_get_ordered[p] > 0)
4684  {
4685  // create the send datatype
4686  MPI_Datatype type_send;
4687  MPI_Type_indexed(Nrows_to_send_for_get_ordered[p],
4688  block_lengths,
4689  Rows_to_send_for_get_ordered[p],
4690  MPI_DOUBLE,
4691  &type_send);
4692  MPI_Type_commit(&type_send);
4693 
4694  // send
4695  MPI_Request send_req;
4696  MPI_Isend(const_cast<double*>(v.values_pt()),
4697  1,
4698  type_send,
4699  p,
4700  0,
4701  this->distribution_pt()->communicator_pt()->mpi_comm(),
4702  &send_req);
4703  MPI_Type_free(&type_send);
4704  requests.push_back(send_req);
4705  }
4706 
4707  if (Nrows_to_recv_for_get_ordered[p] > 0)
4708  {
4709  // create the recv datatype
4710  MPI_Datatype type_recv;
4711  MPI_Type_indexed(Nrows_to_recv_for_get_ordered[p],
4712  block_lengths,
4713  Rows_to_recv_for_get_ordered[p],
4714  MPI_DOUBLE,
4715  &type_recv);
4716  MPI_Type_commit(&type_recv);
4717 
4718  // recv
4719  MPI_Request recv_req;
4720  MPI_Irecv(w.values_pt(),
4721  1,
4722  type_recv,
4723  p,
4724  0,
4725  this->distribution_pt()->communicator_pt()->mpi_comm(),
4726  &recv_req);
4727  MPI_Type_free(&type_recv);
4728  requests.push_back(recv_req);
4729  }
4730  }
4731 
4732  // communicate with self
4733  else
4734  {
4735  double* w_values_pt = w.values_pt();
4736  const double* v_values_pt = v.values_pt();
4737  for (unsigned i = 0; i < Nrows_to_send_for_get_ordered[p]; i++)
4738  {
4739  w_values_pt[Rows_to_recv_for_get_ordered[p][i]] =
4740  v_values_pt[Rows_to_send_for_get_ordered[p][i]];
4741  }
4742  }
4743  }
4744 
4745  // and then just wait
4746  unsigned c = requests.size();
4747  Vector<MPI_Status> stat(c);
4748  if (c)
4749  {
4750  MPI_Waitall(c, &requests[0], &stat[0]);
4751  }
4752  delete[] block_lengths;
4753 
4754 #else
4755  // throw error
4756  std::ostringstream error_message;
4757  error_message << "The preconditioner is distributed and on more than one "
4758  << "processor. MPI is required.";
4759  throw OomphLibError(
4760  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4761 #endif
4762  }
4763  }
4764 
4765  //============================================================================
4766  /// Given the naturally ordered vector, v, return
4767  /// the vector rearranged in block order in w. This function calls
4768  /// get_concatenated_block_vector(...) with the identity block mapping.
4769  ///
4770  /// This function has been re-written to work with the new dof type
4771  /// coarsening feature. The old function is kept alive in
4772  /// internal_get_block_ordered_preconditioner_vector(...) and is moved to
4773  /// the private section of the code. The differences between the two are:
4774  ///
4775  /// 1) This function extracts all the block vectors (in one go) via the
4776  /// function internal_get_block_vectors(...), and concatenates them.
4777  ///
4778  /// 2) The old function makes use of the variables ending in "get_ordered",
4779  /// thus is slightly more efficient since it does not have to concatenate
4780  /// any block vectors.
4781  ///
4782  /// 3) The old function no longer respect the new indirections if dof types
4783  /// have been coarsened.
4784  ///
4785  /// 4) This function extracts the most fine grain dof-level vectors and
4786  /// concatenates them. These dof-level vectors respect the re-ordering
4787  /// caused by the coarsening of dof types. The overhead associated with
4788  /// concatenating DoubleVectors without communication is very small.
4789  ///
4790  /// This function should be used.
4791  //============================================================================
4792  template<typename MATRIX>
4794  const DoubleVector& v, DoubleVector& w)
4795  {
4796 #ifdef PARANOID
4797  if (!v.built())
4798  {
4799  std::ostringstream error_message;
4800  error_message << "The distribution of the global vector v must be setup.";
4801  throw OomphLibError(
4802  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4803  }
4804  if (*v.distribution_pt() != *this->master_distribution_pt())
4805  {
4806  std::ostringstream error_message;
4807  error_message << "The distribution of the global vector v must match the "
4808  << " specified master_distribution_pt(). \n"
4809  << "i.e. Distribution_pt in the master preconditioner";
4810  throw OomphLibError(
4811  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4812  }
4813 #endif
4814 
4815  // Get the number of blocks.
4816  unsigned nblocks = this->nblock_types();
4817 
4818  // Fill in the identity mapping.
4819  Vector<unsigned> block_vec_number(nblocks, 0);
4820  for (unsigned b = 0; b < nblocks; b++)
4821  {
4822  block_vec_number[b] = b;
4823  }
4824 
4825  // Do the work.
4826  get_concatenated_block_vector(block_vec_number, v, w);
4827  } // get_block_ordered_preconditioner_vector(...)
4828 
4829  //============================================================================
4830  /// Takes the block ordered vector, w, and reorders it in the natural
4831  /// order. Reordered vector is returned in v. Note: If the preconditioner is
4832  /// a subsidiary preconditioner then only the components of the vector
4833  /// associated with the blocks of the subsidiary preconditioner will be
4834  /// included. Hence the length of v is master_nrow() whereas that of the
4835  /// vector w is of length this->nrow().
4836  ///
4837  /// This is the return function for the function
4838  /// internal_get_block_ordered_preconditioner_vector(...).
4839  /// Both internal_get_block_ordered_preconditioner_vector(...) and
4840  /// internal_return_block_ordered_preconditioner_vector(...) has been
4841  /// superseded by the functions
4842  ///
4843  /// get_block_ordered_preconditioner_vector(...) and
4844  /// return_block_ordered_preconditioner_vector(...),
4845  ///
4846  /// Thus this function is moved to the private section of the code.
4847  //============================================================================
4848  template<typename MATRIX>
4851  DoubleVector& v) const
4852  {
4853 #ifdef PARANOID
4854  if (!v.built())
4855  {
4856  std::ostringstream error_message;
4857  error_message << "The distribution of the global vector v must be setup.";
4858  throw OomphLibError(
4859  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4860  }
4861  if (*v.distribution_pt() != *this->master_distribution_pt())
4862  {
4863  std::ostringstream error_message;
4864  error_message << "The distribution of the global vector v must match the "
4865  << " specified master_distribution_pt(). \n"
4866  << "i.e. Distribution_pt in the master preconditioner";
4867  throw OomphLibError(
4868  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4869  }
4870  if (!w.built())
4871  {
4872  std::ostringstream error_message;
4873  error_message << "The distribution of the block vector w must be setup.";
4874  throw OomphLibError(
4875  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4876  }
4877  if (*w.distribution_pt() !=
4878  *this->internal_preconditioner_matrix_distribution_pt())
4879  {
4880  std::ostringstream error_message;
4881  error_message << "The distribution of the block vector w must match the "
4882  << " specified distribution at Distribution_pt[b]";
4883  throw OomphLibError(
4884  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4885  }
4886 #endif
4887 
4888 
4889  // if + only one processor
4890  // + more than one processor but matrix_pt is not distributed
4891  // then use the serial get_block method
4892  if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4893  !this->distribution_pt()->distributed())
4894  {
4895  // number of blocks
4896  unsigned nblock = this->Internal_nblock_types;
4897 
4898  // copy to w
4899  unsigned block_offset = 0;
4900  const double* w_pt = w.values_pt();
4901  double* v_pt = v.values_pt();
4902  for (unsigned b = 0; b < nblock; b++)
4903  {
4904  unsigned block_nrow = this->internal_block_dimension(b);
4905  for (unsigned i = 0; i < block_nrow; i++)
4906  {
4907  v_pt[this->Global_index[b][i]] = w_pt[block_offset + i];
4908  }
4909  block_offset += block_nrow;
4910  }
4911  }
4912  // otherwise use mpi
4913  else
4914  {
4915 #ifdef OOMPH_HAS_MPI
4916 
4917  // my rank
4918  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4919 
4920  // the number of processors
4921  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4922 
4923  // determine the maximum number of rows to be sent or recv
4924  unsigned max_n_send_or_recv = 0;
4925  for (unsigned p = 0; p < nproc; p++)
4926  {
4927  max_n_send_or_recv =
4928  std::max(max_n_send_or_recv, Nrows_to_send_for_get_ordered[p]);
4929  max_n_send_or_recv =
4930  std::max(max_n_send_or_recv, Nrows_to_recv_for_get_ordered[p]);
4931  }
4932 
4933  // create a vectors of 1s (the size of the nblock for the mpi indexed
4934  // data types
4935  int* block_lengths = new int[max_n_send_or_recv];
4936  for (unsigned i = 0; i < max_n_send_or_recv; i++)
4937  {
4938  block_lengths[i] = 1;
4939  }
4940 
4941  // perform the sends and receives
4942  Vector<MPI_Request> requests;
4943  for (unsigned p = 0; p < nproc; p++)
4944  {
4945  // send and recv with other processors
4946  if (p != my_rank)
4947  {
4948  if (Nrows_to_recv_for_get_ordered[p] > 0)
4949  {
4950  // create the send datatype
4951  MPI_Datatype type_send;
4952  MPI_Type_indexed(Nrows_to_recv_for_get_ordered[p],
4953  block_lengths,
4954  Rows_to_recv_for_get_ordered[p],
4955  MPI_DOUBLE,
4956  &type_send);
4957  MPI_Type_commit(&type_send);
4958 
4959  // send
4960  MPI_Request send_req;
4961  MPI_Isend(const_cast<double*>(w.values_pt()),
4962  1,
4963  type_send,
4964  p,
4965  0,
4966  this->distribution_pt()->communicator_pt()->mpi_comm(),
4967  &send_req);
4968  MPI_Type_free(&type_send);
4969  requests.push_back(send_req);
4970  }
4971 
4972  if (Nrows_to_send_for_get_ordered[p] > 0)
4973  {
4974  // create the recv datatype
4975  MPI_Datatype type_recv;
4976  MPI_Type_indexed(Nrows_to_send_for_get_ordered[p],
4977  block_lengths,
4978  Rows_to_send_for_get_ordered[p],
4979  MPI_DOUBLE,
4980  &type_recv);
4981  MPI_Type_commit(&type_recv);
4982 
4983  // recv
4984  MPI_Request recv_req;
4985  MPI_Irecv(v.values_pt(),
4986  1,
4987  type_recv,
4988  p,
4989  0,
4990  this->distribution_pt()->communicator_pt()->mpi_comm(),
4991  &recv_req);
4992  MPI_Type_free(&type_recv);
4993  requests.push_back(recv_req);
4994  }
4995  }
4996 
4997  // communicate wih self
4998  else
4999  {
5000  const double* w_values_pt = w.values_pt();
5001  double* v_values_pt = v.values_pt();
5002  for (unsigned i = 0; i < Nrows_to_send_for_get_ordered[p]; i++)
5003  {
5004  v_values_pt[Rows_to_send_for_get_ordered[p][i]] =
5005  w_values_pt[Rows_to_recv_for_get_ordered[p][i]];
5006  }
5007  }
5008  }
5009 
5010  // and then just wait
5011  unsigned c = requests.size();
5012  Vector<MPI_Status> stat(c);
5013  if (c)
5014  {
5015  MPI_Waitall(c, &requests[0], &stat[0]);
5016  }
5017  delete[] block_lengths;
5018 
5019 #else
5020  // throw error
5021  std::ostringstream error_message;
5022  error_message << "The preconditioner is distributed and on more than one "
5023  << "processor. MPI is required.";
5024  throw OomphLibError(
5025  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5026 #endif
5027  } // else use mpi
5028  } // function return_block_ordered_preconditioner_vector
5029 
5030 
5031  //============================================================================
5032  /// Takes the block ordered vector, w, and reorders it in natural
5033  /// order. Reordered vector is returned in v. Note: If the preconditioner is
5034  /// a subsidiary preconditioner then only the components of the vector
5035  /// associated with the blocks of the subsidiary preconditioner will be
5036  /// included. Hence the length of v is master_nrow() whereas that of the
5037  /// vector w is of length this->nrow().
5038  ///
5039  /// This is the return function for the function
5040  /// get_block_ordered_preconditioner_vector(...).
5041  ///
5042  /// It calls the function return_concatenated_block_vector(...) with the
5043  /// identity block number ordering.
5044  //============================================================================
5045  template<typename MATRIX>
5047  const DoubleVector& w, DoubleVector& v) const
5048  {
5049 #ifdef PARANOID
5050  if (!v.built())
5051  {
5052  std::ostringstream error_message;
5053  error_message << "The distribution of the global vector v must be setup.";
5054  throw OomphLibError(
5055  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5056  }
5057  if (*v.distribution_pt() != *this->master_distribution_pt())
5058  {
5059  std::ostringstream error_message;
5060  error_message << "The distribution of the global vector v must match the "
5061  << " specified master_distribution_pt(). \n"
5062  << "i.e. Distribution_pt in the master preconditioner";
5063  throw OomphLibError(
5064  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5065  }
5066  if (!w.built())
5067  {
5068  std::ostringstream error_message;
5069  error_message << "The distribution of the block vector w must be setup.";
5070  throw OomphLibError(
5071  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5072  }
5073  if (*w.distribution_pt() != *this->preconditioner_matrix_distribution_pt())
5074  {
5075  std::ostringstream error_message;
5076  error_message << "The distribution of the block vector w must match the "
5077  << "concatenations of distributions in "
5078  << "Block_distribution_pt.\n";
5079  throw OomphLibError(
5080  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5081  }
5082 #endif
5083 
5084  // Split into the block vectors.
5085  const unsigned nblocks = nblock_types();
5086  Vector<unsigned> block_vec_number(nblocks, 0);
5087  for (unsigned b = 0; b < nblocks; b++)
5088  {
5089  block_vec_number[b] = b;
5090  }
5091 
5092  return_concatenated_block_vector(block_vec_number, w, v);
5093  } // function return_block_ordered_preconditioner_vector
5094 
5095  //=============================================================================
5096  /// Gets block (i,j) from the matrix pointed to by
5097  /// Matrix_pt and returns it in output_block. This is associated with the
5098  /// internal blocks. Please use the other get_block(...) function.
5099  //=============================================================================
5100  template<>
5102  const unsigned& block_i,
5103  const unsigned& block_j,
5104  CRDoubleMatrix& output_block) const
5105  {
5106 #ifdef PARANOID
5107  // the number of blocks
5108  const unsigned n_blocks = this->internal_nblock_types();
5109 
5110  // paranoid check that block i is in this block preconditioner
5111  if (block_i >= n_blocks || block_j >= n_blocks)
5112  {
5113  std::ostringstream error_message;
5114  error_message
5115  << "Requested block (" << block_i << "," << block_j
5116  << "), however this preconditioner has internal_nblock_types() "
5117  << "= " << internal_nblock_types() << std::endl;
5118  throw OomphLibError(
5119  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5120  }
5121 
5122  // Check that the matrix is the same as that of the master
5123  if (is_subsidiary_block_preconditioner())
5124  {
5125  if (master_block_preconditioner_pt()->matrix_pt() != matrix_pt())
5126  {
5127  std::string err = "Master and subs should have same matrix.";
5128  throw OomphLibError(
5129  err, OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5130  }
5131  }
5132 #endif
5133 
5134  // Cast the pointer
5135  CRDoubleMatrix* cr_matrix_pt = dynamic_cast<CRDoubleMatrix*>(matrix_pt());
5136 
5137  // if + only one processor
5138  // + more than one processor but matrix_pt is not distributed
5139  // then use the serial get_block method
5140  if (cr_matrix_pt->distribution_pt()->communicator_pt()->nproc() == 1 ||
5141  !cr_matrix_pt->distribution_pt()->distributed())
5142  {
5143  // pointers for the jacobian matrix is compressed row sparse format
5144  int* j_row_start;
5145  int* j_column_index;
5146  double* j_value;
5147 
5148  // sets pointers to jacobian matrix
5149  j_row_start = cr_matrix_pt->row_start();
5150  j_column_index = cr_matrix_pt->column_index();
5151  j_value = cr_matrix_pt->value();
5152 
5153  // get the block dimensions
5154  unsigned block_nrow = this->internal_block_dimension(block_i);
5155  unsigned block_ncol = this->internal_block_dimension(block_j);
5156 
5157  // allocate temporary storage for the component vectors of block (i,j)
5158  // temp_ptr is used to point to an element in each column - required as
5159  // cannot assume that order of block's rows in jacobian and the block
5160  // matrix will be the same
5161  int* temp_row_start = new int[block_nrow + 1];
5162  for (unsigned i = 0; i <= block_nrow; i++)
5163  {
5164  temp_row_start[i] = 0;
5165  }
5166  Vector<int> temp_ptr(block_nrow + 1);
5167  int block_nnz = 0;
5168 
5169  // get number of rows in source matrix
5170  unsigned master_nrow = this->master_nrow();
5171 
5172  // determine how many non zeros there are in the block (i,j)
5173  // also determines how many non zeros are stored in each row or column -
5174  // stored in temp_ptr temporarily
5175  for (unsigned k = 0; k < master_nrow; k++)
5176  {
5177  if (internal_block_number(k) == static_cast<int>(block_i))
5178  {
5179  for (int l = j_row_start[k]; l < j_row_start[k + 1]; l++)
5180  {
5181  if (internal_block_number(j_column_index[l]) ==
5182  static_cast<int>(block_j))
5183  {
5184  block_nnz++;
5185  temp_ptr[internal_index_in_block(k) + 1]++;
5186  }
5187  }
5188  }
5189  }
5190 
5191  // if the matrix is not empty
5192  int* temp_column_index = new int[block_nnz];
5193  double* temp_value = new double[block_nnz];
5194  if (block_nnz > 0)
5195  {
5196  // uses number of elements in each column of block to determine values
5197  // for the block column start (temp_row_start)
5198  temp_row_start[0] = 0;
5199  for (unsigned k = 1; k <= block_nrow; k++)
5200  {
5201  temp_row_start[k] = temp_row_start[k - 1] + temp_ptr[k];
5202  temp_ptr[k] = temp_row_start[k];
5203  }
5204 
5205  // copies the relevant elements of the jacobian to the correct entries
5206  // of the block matrix
5207  for (unsigned k = 0; k < master_nrow; k++)
5208  {
5209  if (internal_block_number(k) == static_cast<int>(block_i))
5210  {
5211  for (int l = j_row_start[k]; l < j_row_start[k + 1]; l++)
5212  {
5213  if (internal_block_number(j_column_index[l]) ==
5214  static_cast<int>(block_j))
5215  {
5216  int kk = temp_ptr[internal_index_in_block(k)]++;
5217  temp_value[kk] = j_value[l];
5218  temp_column_index[kk] =
5219  internal_index_in_block(j_column_index[l]);
5220  }
5221  }
5222  }
5223  }
5224  }
5225 
5226 
5227  // Fill in the compressed row matrix ??ds Note: I kept the calls to
5228  // build as close as I could to before (had to replace new(dist) with
5229  // .build(dist) ).
5230  output_block.build(Internal_block_distribution_pt[block_i]);
5231  output_block.build_without_copy(
5232  block_ncol, block_nnz, temp_value, temp_column_index, temp_row_start);
5233 
5234 #ifdef PARANOID
5235  // checks to see if block matrix has been set up correctly
5236  // block_matrix_test(matrix_pt,block_i,block_j,block_pt);
5237  if (Run_block_matrix_test)
5238  {
5239  // checks to see if block matrix has been set up correctly
5240  block_matrix_test(block_i, block_j, &output_block);
5241  }
5242 #endif
5243  }
5244 
5245 
5246  // otherwise we are dealing with a distributed matrix
5247  else
5248  {
5249 #ifdef OOMPH_HAS_MPI
5250  // number of processors
5251  unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
5252 
5253  // my rank
5254  unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
5255 
5256  // sets pointers to jacobian matrix
5257  int* j_row_start = cr_matrix_pt->row_start();
5258  int* j_column_index = cr_matrix_pt->column_index();
5259  double* j_value = cr_matrix_pt->value();
5260 
5261  // number of non zeros in each row to be sent
5262  Vector<int*> nnz_send(nproc, 0);
5263 
5264  // number of non zeros in each row to be received
5265  Vector<int*> nnz_recv(nproc, 0);
5266 
5267  // storage for data to be sent
5268  Vector<int*> column_index_for_proc(nproc, 0);
5269  Vector<double*> values_for_proc(nproc, 0);
5270 
5271  // number of non zeros to be sent to each processor
5272  Vector<unsigned> total_nnz_send(nproc, 0);
5273 
5274  // number of rows of the block matrix on this processor
5275  unsigned nrow_local =
5276  Internal_block_distribution_pt[block_i]->nrow_local();
5277 
5278  // resize the nnz storage and compute nnz_send
5279  // and send and recv the nnz
5280  Vector<MPI_Request> send_req;
5281  Vector<MPI_Request> recv1_req;
5282  for (unsigned p = 0; p < nproc; p++)
5283  {
5284  int nrow_send = Nrows_to_send_for_get_block(block_i, p);
5285  int nrow_recv = Nrows_to_recv_for_get_block(block_i, p);
5286 
5287  // assemble nnz recv
5288  nnz_recv[p] = new int[nrow_recv];
5289 
5290  // assemble the storage to send
5291  if (nrow_send > 0 && p != my_rank)
5292  {
5293  nnz_send[p] = new int[nrow_send];
5294  }
5295 
5296  // compute the number of nnzs in each row and the total number
5297  // of nnzs
5298  for (int i = 0; i < nrow_send; i++)
5299  {
5300  unsigned row = Rows_to_send_for_get_block(block_i, p)[i];
5301  int c = 0;
5302  for (int r = j_row_start[row]; r < j_row_start[row + 1]; r++)
5303  {
5304  if (internal_block_number(j_column_index[r]) == int(block_j))
5305  {
5306  c++;
5307  }
5308  }
5309  if (p != my_rank)
5310  {
5311  nnz_send[p][i] = c;
5312  }
5313  else
5314  {
5315  nnz_recv[p][i] = c;
5316  }
5317  total_nnz_send[p] += c;
5318  }
5319 
5320  // send
5321  if (p != my_rank)
5322  {
5323  if (nrow_send)
5324  {
5325  MPI_Request req;
5326  MPI_Isend(nnz_send[p],
5327  nrow_send,
5328  MPI_INT,
5329  p,
5330  0,
5331  this->distribution_pt()->communicator_pt()->mpi_comm(),
5332  &req);
5333  send_req.push_back(req);
5334  }
5335 
5336  // recv
5337  if (nrow_recv)
5338  {
5339  MPI_Request req;
5340  MPI_Irecv(nnz_recv[p],
5341  nrow_recv,
5342  MPI_INT,
5343  p,
5344  0,
5345  this->distribution_pt()->communicator_pt()->mpi_comm(),
5346  &req);
5347  recv1_req.push_back(req);
5348  }
5349  }
5350  }
5351 
5352  // next assemble the values and row_start data to be sent for each
5353  // processor
5354  for (unsigned p = 0; p < nproc; p++)
5355  {
5356  int nrow_send = Nrows_to_send_for_get_block(block_i, p);
5357 
5358  // assemble the storage for the values and column indices to be sent
5359  if (p != my_rank)
5360  {
5361  if (total_nnz_send[p] > 0)
5362  {
5363  values_for_proc[p] = new double[total_nnz_send[p]];
5364  column_index_for_proc[p] = new int[total_nnz_send[p]];
5365 
5366  // copy the values and column indices to the storage
5367  unsigned ptr = 0;
5368  for (int i = 0; i < nrow_send; i++)
5369  {
5370  unsigned row = Rows_to_send_for_get_block(block_i, p)[i];
5371  for (int r = j_row_start[row]; r < j_row_start[row + 1]; r++)
5372  {
5373  if (internal_block_number(j_column_index[r]) == int(block_j))
5374  {
5375  values_for_proc[p][ptr] = j_value[r];
5376  column_index_for_proc[p][ptr] =
5377  internal_index_in_block(j_column_index[r]);
5378  ptr++;
5379  }
5380  }
5381  }
5382 
5383  // create the datatypes
5384  MPI_Datatype types[2];
5385  MPI_Type_contiguous(total_nnz_send[p], MPI_DOUBLE, &types[0]);
5386  MPI_Type_commit(&types[0]);
5387  MPI_Type_contiguous(total_nnz_send[p], MPI_INT, &types[1]);
5388  MPI_Type_commit(&types[1]);
5389 
5390  // get the start address of the vectors
5391  MPI_Aint displacement[2];
5392  MPI_Get_address(values_for_proc[p], &displacement[0]);
5393  MPI_Get_address(column_index_for_proc[p], &displacement[1]);
5394 
5395  // compute the displacements
5396  displacement[1] -= displacement[0];
5397  displacement[0] -= displacement[0];
5398 
5399  // compute the block lengths
5400  int length[2];
5401  length[0] = length[1] = 1;
5402 
5403  // build the struct data type
5404  MPI_Datatype final_type;
5405  MPI_Type_create_struct(2, length, displacement, types, &final_type);
5406  MPI_Type_commit(&final_type);
5407  MPI_Type_free(&types[0]);
5408  MPI_Type_free(&types[1]);
5409 
5410  // and send
5411  MPI_Request req;
5412  MPI_Isend(values_for_proc[p],
5413  1,
5414  final_type,
5415  p,
5416  1,
5417  this->distribution_pt()->communicator_pt()->mpi_comm(),
5418  &req);
5419  send_req.push_back(req);
5420  MPI_Type_free(&final_type);
5421  }
5422  }
5423  }
5424 
5425  // wait for the recv to complete (the row_start recv which actually
5426  // contains the number of nnzs in each row)
5427  int c_recv = recv1_req.size();
5428  if (c_recv != 0)
5429  {
5430  MPI_Waitall(c_recv, &recv1_req[0], MPI_STATUS_IGNORE);
5431  }
5432 
5433  // compute the total number of nnzs to be received
5434  Vector<int> total_nnz_recv_from_proc(nproc);
5435  int local_block_nnz = 0;
5436  for (unsigned p = 0; p < nproc; p++)
5437  {
5438  // compute the total nnzs
5439  for (unsigned i = 0; i < Nrows_to_recv_for_get_block(block_i, p); i++)
5440  {
5441  total_nnz_recv_from_proc[p] += nnz_recv[p][i];
5442  }
5443  local_block_nnz += total_nnz_recv_from_proc[p];
5444  }
5445 
5446  // compute the offset for each block of nnzs (a matrix row) in the
5447  // values_recv and column_index_recv vectors
5448 
5449  // fisrt determine how many blocks of rows are to be recv
5450  Vector<int> n_recv_block(nproc, 0);
5451  for (unsigned p = 0; p < nproc; p++)
5452  {
5453  if (Nrows_to_recv_for_get_block(block_i, p) > 0)
5454  {
5455  n_recv_block[p] = 1;
5456  }
5457  for (unsigned i = 1; i < Nrows_to_recv_for_get_block(block_i, p); i++)
5458  {
5459  if (Rows_to_recv_for_get_block(block_i, p)[i] !=
5460  Rows_to_recv_for_get_block(block_i, p)[i - 1] + 1)
5461  {
5462  n_recv_block[p]++;
5463  }
5464  }
5465  }
5466 
5467  // next assemble row start recv
5468  int* row_start_recv = new int[nrow_local + 1];
5469  for (unsigned i = 0; i <= nrow_local; i++)
5470  {
5471  row_start_recv[i] = 0;
5472  }
5473  for (unsigned p = 0; p < nproc; p++)
5474  {
5475  for (unsigned i = 0; i < Nrows_to_recv_for_get_block(block_i, p); i++)
5476  {
5477  row_start_recv[Rows_to_recv_for_get_block(block_i, p)[i]] =
5478  nnz_recv[p][i];
5479  }
5480  }
5481  int g = row_start_recv[0];
5482  row_start_recv[0] = 0;
5483  for (unsigned i = 1; i < nrow_local; i++)
5484  {
5485  int temp_g = g;
5486  g = row_start_recv[i];
5487  row_start_recv[i] = row_start_recv[i - 1] + temp_g;
5488  }
5489  row_start_recv[nrow_local] = row_start_recv[nrow_local - 1] + g;
5490 
5491  // next assemble the offset and the number of nzs in each recv block
5492  Vector<int*> offset_recv_block(nproc, 0);
5493  Vector<int*> nnz_recv_block(nproc, 0);
5494  for (unsigned p = 0; p < nproc; p++)
5495  {
5496  if (Nrows_to_recv_for_get_block(block_i, p) > 0)
5497  {
5498  offset_recv_block[p] = new int[n_recv_block[p]];
5499  offset_recv_block[p][0] = 0;
5500  nnz_recv_block[p] = new int[n_recv_block[p]];
5501  for (int i = 0; i < n_recv_block[p]; i++)
5502  {
5503  nnz_recv_block[p][i] = 0;
5504  }
5505  unsigned ptr = 0;
5506  nnz_recv_block[p][ptr] += nnz_recv[p][0];
5507  offset_recv_block[p][0] =
5508  row_start_recv[Rows_to_recv_for_get_block(block_i, p)[0]];
5509  for (unsigned i = 1; i < Nrows_to_recv_for_get_block(block_i, p); i++)
5510  {
5511  if (Rows_to_recv_for_get_block(block_i, p)[i] !=
5512  Rows_to_recv_for_get_block(block_i, p)[i - 1] + 1)
5513  {
5514  ptr++;
5515  offset_recv_block[p][ptr] =
5516  row_start_recv[Rows_to_recv_for_get_block(block_i, p)[i]];
5517  }
5518  nnz_recv_block[p][ptr] += nnz_recv[p][i];
5519  }
5520  }
5521  delete[] nnz_recv[p];
5522  }
5523 
5524  // post the receives
5525  int* column_index_recv = new int[local_block_nnz];
5526  double* values_recv = new double[local_block_nnz];
5527  Vector<MPI_Request> recv2_req;
5528  for (unsigned p = 0; p < nproc; p++)
5529  {
5530  if (p != my_rank)
5531  {
5532  if (total_nnz_recv_from_proc[p] != 0)
5533  {
5534  // create the datatypes
5535  MPI_Datatype types[2];
5536  MPI_Type_indexed(n_recv_block[p],
5537  nnz_recv_block[p],
5538  offset_recv_block[p],
5539  MPI_DOUBLE,
5540  &types[0]);
5541  MPI_Type_commit(&types[0]);
5542  MPI_Type_indexed(n_recv_block[p],
5543  nnz_recv_block[p],
5544  offset_recv_block[p],
5545  MPI_INT,
5546  &types[1]);
5547  MPI_Type_commit(&types[1]);
5548 
5549  // compute the displacements
5550  MPI_Aint displacements[2];
5551  MPI_Get_address(values_recv, &displacements[0]);
5552  MPI_Get_address(column_index_recv, &displacements[1]);
5553  displacements[1] -= displacements[0];
5554  displacements[0] -= displacements[0];
5555 
5556  // compute the block lengths
5557  int length[2];
5558  length[0] = length[1] = 1;
5559 
5560  // create the final datatype
5561  MPI_Datatype final_type;
5562  MPI_Type_create_struct(
5563  2, length, displacements, types, &final_type);
5564  MPI_Type_commit(&final_type);
5565  MPI_Type_free(&types[0]);
5566  MPI_Type_free(&types[1]);
5567 
5568  // and the recv
5569  MPI_Request req;
5570  MPI_Irecv(values_recv,
5571  1,
5572  final_type,
5573  p,
5574  1,
5575  this->distribution_pt()->communicator_pt()->mpi_comm(),
5576  &req);
5577  recv2_req.push_back(req);
5578  MPI_Type_free(&final_type);
5579  }
5580  }
5581  else
5582  {
5583  // next send the values and column indices to self
5584  unsigned block_ptr = 0;
5585  unsigned counter = 0;
5586  int nrow_send = Nrows_to_send_for_get_block(block_i, my_rank);
5587  if (nrow_send > 0)
5588  {
5589  unsigned offset = offset_recv_block[my_rank][0];
5590  for (int i = 0; i < nrow_send; i++)
5591  {
5592  if (i > 0)
5593  {
5594  if (Rows_to_recv_for_get_block(block_i, p)[i] !=
5595  Rows_to_recv_for_get_block(block_i, p)[i - 1] + 1)
5596  {
5597  counter = 0;
5598  block_ptr++;
5599  offset = offset_recv_block[my_rank][block_ptr];
5600  }
5601  }
5602  unsigned row = Rows_to_send_for_get_block(block_i, my_rank)[i];
5603  for (int r = j_row_start[row]; r < j_row_start[row + 1]; r++)
5604  {
5605  if (internal_block_number(j_column_index[r]) == int(block_j))
5606  {
5607  values_recv[offset + counter] = j_value[r];
5608  column_index_recv[offset + counter] =
5609  internal_index_in_block(j_column_index[r]);
5610  counter++;
5611  }
5612  }
5613  }
5614  }
5615  }
5616  }
5617 
5618  // wait for the recv to complete (for the column_index and the values_
5619  c_recv = recv2_req.size();
5620  if (c_recv != 0)
5621  {
5622  MPI_Waitall(c_recv, &recv2_req[0], MPI_STATUS_IGNORE);
5623  }
5624 
5625  // Fill in the compressed row matrix
5626  output_block.build(Internal_block_distribution_pt[block_i]);
5627  output_block.build_without_copy(this->internal_block_dimension(block_j),
5628  local_block_nnz,
5629  values_recv,
5630  column_index_recv,
5631  row_start_recv);
5632 
5633  // wait for the send to complete (nnz / row_start)
5634  int c_send = send_req.size();
5635  if (c_send)
5636  {
5637  MPI_Waitall(c_send, &send_req[0], MPI_STATUS_IGNORE);
5638  }
5639 
5640  // delete temp storage used for assembling data for communication
5641  for (unsigned p = 0; p < nproc; p++)
5642  {
5643  delete[] nnz_send[p];
5644  delete[] column_index_for_proc[p];
5645  delete[] values_for_proc[p];
5646  delete[] offset_recv_block[p];
5647  delete[] nnz_recv_block[p];
5648  }
5649 #else
5650  // throw error
5651  std::ostringstream error_message;
5652  error_message << "The matrix is distributed and on more than one "
5653  << "processor. MPI is required.";
5654  throw OomphLibError(
5655  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5656 #endif
5657  }
5658  }
5659 
5660  //=============================================================================
5661  /// Gets dof-level block (i,j).
5662  /// If Replacement_dof_block_pt(i,j) is not null, then the replacement
5663  /// block is returned via a deep copy.
5664  ///
5665  /// Otherwise if this is the uppermost block preconditioner then it calls
5666  /// internal_get_block(i,j), else if it is a subsidiary
5667  /// block preconditioner, it will call it's master block preconditioners'
5668  /// get_dof_level_block function.
5669  //=============================================================================
5670  template<>
5672  const unsigned& block_i,
5673  const unsigned& block_j,
5674  CRDoubleMatrix& output_block,
5675  const bool& ignore_replacement_block) const
5676  {
5677 #ifdef PARANOID
5678  // the number of dof types.
5679  unsigned para_ndofs = ndof_types();
5680 
5681  // paranoid check that block i is in this block preconditioner
5682  if (block_i >= para_ndofs || block_j >= para_ndofs)
5683  {
5684  std::ostringstream err_msg;
5685  err_msg << "Requested dof block (" << block_i << "," << block_j
5686  << "), however this preconditioner has ndof_types() "
5687  << "= " << para_ndofs << std::endl;
5688  throw OomphLibError(
5689  err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5690  }
5691 #endif
5692 
5693  CRDoubleMatrix* tmp_block_pt =
5694  Replacement_dof_block_pt.get(block_i, block_j);
5695 
5696  if ((tmp_block_pt == 0) || ignore_replacement_block)
5697  {
5698  // Getting the block from parent preconditioner
5699  const unsigned ndof_in_parent_i =
5700  Doftype_coarsen_map_coarse[block_i].size();
5701  const unsigned ndof_in_parent_j =
5702  Doftype_coarsen_map_coarse[block_j].size();
5703 
5704  if (ndof_in_parent_i == 1 && ndof_in_parent_j == 1)
5705  {
5706  unsigned parent_dof_i = Doftype_coarsen_map_coarse[block_i][0];
5707  unsigned parent_dof_j = Doftype_coarsen_map_coarse[block_j][0];
5708 
5709  if (is_master_block_preconditioner())
5710  {
5711  internal_get_block(parent_dof_i, parent_dof_j, output_block);
5712  }
5713  else
5714  {
5715  parent_dof_i = Doftype_in_master_preconditioner_coarse[parent_dof_i];
5716  parent_dof_j = Doftype_in_master_preconditioner_coarse[parent_dof_j];
5717 
5718  master_block_preconditioner_pt()->get_dof_level_block(
5719  parent_dof_i, parent_dof_j, output_block, ignore_replacement_block);
5720  }
5721  }
5722  else
5723  {
5724  DenseMatrix<CRDoubleMatrix*> tmp_blocks_pt(
5725  ndof_in_parent_i, ndof_in_parent_j, 0);
5726 
5727  Vector<Vector<unsigned>> new_block(
5728  ndof_in_parent_i, Vector<unsigned>(ndof_in_parent_j, 0));
5729 
5730  for (unsigned dof_i = 0; dof_i < ndof_in_parent_i; dof_i++)
5731  {
5732  unsigned parent_dof_i = Doftype_coarsen_map_coarse[block_i][dof_i];
5733  if (is_subsidiary_block_preconditioner())
5734  {
5735  parent_dof_i =
5736  Doftype_in_master_preconditioner_coarse[parent_dof_i];
5737  }
5738 
5739  for (unsigned dof_j = 0; dof_j < ndof_in_parent_j; dof_j++)
5740  {
5741  unsigned parent_dof_j = Doftype_coarsen_map_coarse[block_j][dof_j];
5742 
5743  tmp_blocks_pt(dof_i, dof_j) = new CRDoubleMatrix;
5744 
5745  new_block[dof_i][dof_j] = 1;
5746 
5747  if (is_master_block_preconditioner())
5748  {
5749  internal_get_block(
5750  parent_dof_i, parent_dof_j, *tmp_blocks_pt(dof_i, dof_j));
5751  }
5752  else
5753  {
5754  parent_dof_j =
5755  Doftype_in_master_preconditioner_coarse[parent_dof_j];
5756 
5757  master_block_preconditioner_pt()->get_dof_level_block(
5758  parent_dof_i,
5759  parent_dof_j,
5760  *tmp_blocks_pt(dof_i, dof_j),
5761  ignore_replacement_block);
5762  }
5763  }
5764  }
5765 
5766  Vector<LinearAlgebraDistribution*> tmp_row_dist_pt(ndof_in_parent_i, 0);
5767 
5768  for (unsigned parent_dof_i = 0; parent_dof_i < ndof_in_parent_i;
5769  parent_dof_i++)
5770  {
5771  unsigned mapped_dof_i =
5772  Doftype_coarsen_map_coarse[block_i][parent_dof_i];
5773 
5774  if (is_master_block_preconditioner())
5775  {
5776  tmp_row_dist_pt[parent_dof_i] =
5777  Internal_block_distribution_pt[mapped_dof_i];
5778  }
5779  else
5780  {
5781  mapped_dof_i =
5782  Doftype_in_master_preconditioner_coarse[mapped_dof_i];
5783 
5784  tmp_row_dist_pt[parent_dof_i] =
5785  master_block_preconditioner_pt()->dof_block_distribution_pt(
5786  mapped_dof_i);
5787  }
5788  }
5789 
5790  Vector<LinearAlgebraDistribution*> tmp_col_dist_pt(ndof_in_parent_j, 0);
5791 
5792  for (unsigned parent_dof_j = 0; parent_dof_j < ndof_in_parent_j;
5793  parent_dof_j++)
5794  {
5795  unsigned mapped_dof_j =
5796  Doftype_coarsen_map_coarse[block_j][parent_dof_j];
5797 
5798  if (is_master_block_preconditioner())
5799  {
5800  tmp_col_dist_pt[parent_dof_j] =
5801  Internal_block_distribution_pt[mapped_dof_j];
5802  }
5803  else
5804  {
5805  mapped_dof_j =
5806  Doftype_in_master_preconditioner_coarse[mapped_dof_j];
5807  tmp_col_dist_pt[parent_dof_j] =
5808  master_block_preconditioner_pt()->dof_block_distribution_pt(
5809  mapped_dof_j);
5810  }
5811  }
5812 
5814  tmp_row_dist_pt, tmp_col_dist_pt, tmp_blocks_pt, output_block);
5815 
5816  for (unsigned dof_i = 0; dof_i < ndof_in_parent_i; dof_i++)
5817  {
5818  for (unsigned dof_j = 0; dof_j < ndof_in_parent_j; dof_j++)
5819  {
5820  if (new_block[dof_i][dof_j])
5821  {
5822  delete tmp_blocks_pt(dof_i, dof_j);
5823  }
5824  }
5825  }
5826  }
5827  }
5828  else
5829  {
5830  CRDoubleMatrixHelpers::deep_copy(tmp_block_pt, output_block);
5831  }
5832  }
5833 
5834  //=============================================================================
5835  /// test function to check that every element in the block matrix
5836  /// (block_i,block_j) matches the corresponding element in the original matrix
5837  //=============================================================================
5838  template<typename MATRIX>
5840  const unsigned& block_i,
5841  const unsigned& block_j,
5842  const MATRIX* block_matrix_pt) const
5843  {
5844  // boolean flag to indicate whether test is passed
5845  bool check = true;
5846 
5847  // number of rows in matrix
5848  unsigned n_row = matrix_pt()->nrow();
5849 
5850  // number of columns in matrix
5851  unsigned n_col = matrix_pt()->ncol();
5852 
5853  // loop over rows of original matrix
5854  for (unsigned i = 0; i < n_row; i++)
5855  {
5856  // if this coefficient is associated with a block in this block
5857  // preconditioner
5858  if (static_cast<int>(block_i) == this->internal_block_number(i))
5859  {
5860  // loop over columns of original matrix
5861  for (unsigned j = 0; j < n_col; j++)
5862  {
5863  // if the coeeficient is associated with a block in this block
5864  // preconditioner
5865  if (static_cast<int>(block_j) == this->internal_block_number(j))
5866  {
5867  // check whether elements in original matrix and matrix of block
5868  // pointers match
5869  if (matrix_pt()->operator()(i, j) !=
5870  block_matrix_pt->operator()(internal_index_in_block(i),
5871  internal_index_in_block(j)))
5872  {
5873  check = false;
5874  }
5875  }
5876  }
5877  }
5878  }
5879 
5880  // throw error
5881  if (!check)
5882  {
5883  std::ostringstream error_message;
5884  error_message << "The require elements have not been successfully copied"
5885  << " from the original matrix to the block matrices";
5886  throw OomphLibError(
5887  error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5888  }
5889  }
5890 
5891 
5892  template class BlockPreconditioner<CRDoubleMatrix>;
5893 
5894 } // namespace oomph
e
Definition: cfortran.h:571
static char t char * s
Definition: cfortran.h:568
cstr elem_len * i
Definition: cfortran.h:603
Block Preconditioner base class. The block structure of the overall problem is determined from the Me...
void internal_return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
void return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
void internal_return_block_vectors(const Vector< unsigned > &block_vec_number, const Vector< DoubleVector > &s, DoubleVector &v) const
A helper function, takes the vector of block vectors, s, and copies its entries into the naturally or...
void internal_return_block_ordered_preconditioner_vector(const DoubleVector &w, DoubleVector &v) const
Takes the block ordered vector, w, and reorders it in the natural order. Reordered vector is returned...
void get_blocks(DenseMatrix< bool > &required_blocks, DenseMatrix< MATRIX * > &block_matrix_pt) const
Get all the block matrices required by the block preconditioner. Takes a pointer to a matrix of bools...
void return_concatenated_block_vector(const Vector< unsigned > &block_vec_number, const DoubleVector &b, DoubleVector &v) const
Takes concatenated block ordered vector, b, and copies its entries to the appropriate entries in the ...
static bool Run_block_matrix_test
Static boolean to allow block_matrix_test(...) to be run. Defaults to false.
void return_block_vectors(const Vector< unsigned > &block_vec_number, const Vector< DoubleVector > &s, DoubleVector &v) const
Takes the vector of block vectors, s, and copies its entries into the naturally ordered vector,...
void get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
Takes the naturally ordered vector, v and returns the n-th block vector, b. Here n is the block numbe...
void return_block_ordered_preconditioner_vector(const DoubleVector &w, DoubleVector &v) const
Takes the block ordered vector, w, and reorders it in natural order. Reordered vector is returned in ...
void turn_into_subsidiary_block_preconditioner(BlockPreconditioner< MATRIX > *master_block_prec_pt, const Vector< unsigned > &doftype_in_master_preconditioner_coarse)
Function to turn this preconditioner into a subsidiary preconditioner that operates within a bigger "...
void internal_get_block(const unsigned &i, const unsigned &j, MATRIX &output_block) const
Gets block (i,j) from the matrix pointed to by Matrix_pt and returns it in output_block....
void internal_get_block_vectors(const Vector< unsigned > &block_vec_number, const DoubleVector &v, Vector< DoubleVector > &s) const
Takes the naturally ordered vector and rearranges it into a vector of sub vectors corresponding to th...
void get_block_vectors(const Vector< unsigned > &block_vec_number, const DoubleVector &v, Vector< DoubleVector > &s) const
Takes the naturally ordered vector and rearranges it into a vector of sub vectors corresponding to th...
void internal_get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
A helper function, takes the naturally ordered vector, v, and extracts the n-th block vector,...
virtual void block_setup()
Determine the size of the matrix blocks and setup the lookup schemes relating the global degrees of f...
void get_concatenated_block_vector(const Vector< unsigned > &block_vec_number, const DoubleVector &v, DoubleVector &b)
Takes the naturally ordered vector and extracts the blocks indicated by the block number (the values)...
void internal_get_block_ordered_preconditioner_vector(const DoubleVector &v, DoubleVector &w) const
Given the naturally ordered vector, v, return the vector rearranged in block order in w....
void get_block_ordered_preconditioner_vector(const DoubleVector &v, DoubleVector &w)
Given the naturally ordered vector, v, return the vector rearranged in block order in w....
A class for compressed row matrices. This is a distributable object.
Definition: matrices.h:888
int * column_index()
Access to C-style column index array.
Definition: matrices.h:1072
int * row_start()
Access to C-style row_start array.
Definition: matrices.h:1060
void build_without_copy(const unsigned &ncol, const unsigned &nnz, double *value, int *column_index, int *row_start)
keeps the existing distribution and just matrix that is stored without copying the matrix data
Definition: matrices.cc:1710
double * value()
Access to C-style value array.
Definition: matrices.h:1084
void build(const LinearAlgebraDistribution *distribution_pt, const unsigned &ncol, const Vector< double > &value, const Vector< int > &column_index, const Vector< int > &row_start)
build method: vector of values, vector of column indices, vector of row starts and number of rows and...
Definition: matrices.cc:1672
static long Is_unclassified
Static "Magic number" used in place of the equation number to denote a value that hasn't been classif...
Definition: nodes.h:192
unsigned long nrow() const
Return the number of rows of the matrix.
Definition: matrices.h:485
unsigned long ncol() const
Return the number of columns of the matrix.
Definition: matrices.h:491
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
unsigned nrow() const
access function to the number of global rows.
A vector in the mathematical sense, initially developed for linear algebra type applications....
Definition: double_vector.h:58
void build(const DoubleVector &old_vector)
Just copys the argument DoubleVector.
double * values_pt()
access function to the underlying values
bool built() const
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
An OomphLibError object which should be thrown when an run-time error is encountered....
An OomphLibWarning object which should be created as a temporary object to issue a warning....
void deep_copy(const CRDoubleMatrix *const in_matrix_pt, CRDoubleMatrix &out_matrix)
Create a deep copy of the matrix pointed to by in_matrix_pt.
Definition: matrices.h:3490
void concatenate_without_communication(const Vector< DoubleVector * > &in_vector_pt, DoubleVector &out_vector)
Concatenate DoubleVectors. Takes a Vector of DoubleVectors. If the out vector is built,...
void split_without_communication(const DoubleVector &in_vector, Vector< DoubleVector * > &out_vector_pt)
Split a DoubleVector into the out DoubleVectors. Data stays on its current processor,...
std::string string(const unsigned &i)
Return the i-th string or "" if the relevant string hasn't been defined.
void concatenate(const Vector< LinearAlgebraDistribution * > &in_distribution_pt, LinearAlgebraDistribution &out_distribution)
Takes a vector of LinearAlgebraDistribution objects and concatenates them such that the nrow_local of...
//////////////////////////////////////////////////////////////////// ////////////////////////////////...