32 template<
typename MATRIX>
49 template<
typename MATRIX>
55 if (this->is_master_block_preconditioner())
57 std::ostringstream err_msg;
61 err_msg <<
"No meshes have been set for this block preconditioner!\n"
62 <<
"Set one with set_nmesh(...), set_mesh(...)" << std::endl;
64 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
65 for (
unsigned m = 0; m < n; m++)
69 err_msg <<
"The mesh pointer to mesh " << m <<
" is null!\n"
70 <<
"Set a non-null one with set_mesh(...)" << std::endl;
72 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
82 if (is_subsidiary_block_preconditioner())
86 unsigned para_doftype_in_master_preconditioner_coarse_size =
87 Doftype_in_master_preconditioner_coarse.size();
93 if (para_doftype_in_master_preconditioner_coarse_size == 0)
95 std::ostringstream err_msg;
96 err_msg <<
"The mapping from the dof types of the master "
97 <<
"block preconditioner \n"
98 <<
"to the subsidiary block preconditioner is empty.\n"
99 <<
"Doftype_in_master_preconditioner_coarse.size() == 0 \n"
100 <<
"has turn_into_subsidiary_block_preconditioner(...)\n"
101 <<
"been called with the correct parameters?\n"
104 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
149 std::set<unsigned> doftype_map_set;
153 unsigned para_doftype_coarsen_map_coarse_size =
154 Doftype_coarsen_map_coarse.size();
162 for (
unsigned i = 0;
i < para_doftype_coarsen_map_coarse_size;
i++)
165 unsigned para_doftype_coarsen_map_coarse_i_size =
166 Doftype_coarsen_map_coarse[
i].size();
167 for (
unsigned j = 0; j < para_doftype_coarsen_map_coarse_i_size; j++)
170 std::pair<std::set<unsigned>::iterator,
bool> doftype_map_ret =
171 doftype_map_set.insert(Doftype_coarsen_map_coarse[
i][j]);
173 if (!doftype_map_ret.second)
175 std::ostringstream err_msg;
176 err_msg <<
"Error: the doftype number "
177 << Doftype_coarsen_map_coarse[
i][j]
178 <<
" is already inserted." << std::endl;
180 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
189 if (para_doftype_in_master_preconditioner_coarse_size !=
190 doftype_map_set.size())
192 std::ostringstream err_msg;
193 err_msg <<
"The size of doftype_in_master_preconditioner_coarse "
194 <<
"must be the same as the total\n"
195 <<
"number of values in the doftype_coarsen_map_coarse vector."
198 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
204 unsigned para_doftype_in_master_preconditioner_coarse_size_minus_one =
205 para_doftype_in_master_preconditioner_coarse_size - 1;
206 if (para_doftype_in_master_preconditioner_coarse_size_minus_one !=
207 *doftype_map_set.rbegin())
209 std::ostringstream err_msg;
210 err_msg <<
"The maximum dof type number in the "
211 <<
"doftype_coarsen_map vector must be "
212 << para_doftype_in_master_preconditioner_coarse_size_minus_one
215 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
240 Doftype_in_master_preconditioner_fine.resize(0);
241 Doftype_coarsen_map_fine.resize(0);
272 if (master_block_preconditioner_pt()->doftype_coarsen_map_fine().size() ==
275 std::ostringstream err_msg;
276 err_msg <<
"The master block preconditioner's "
277 <<
"Doftype_coarsen_map_fine is not\n"
278 <<
"set up properly.\n"
280 <<
"This vector is constructed in the function "
281 <<
"block_setup(...).\n"
284 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
288 unsigned doftype_in_master_preconditioner_coarse_size =
289 Doftype_in_master_preconditioner_coarse.size();
290 for (
unsigned i = 0;
i < doftype_in_master_preconditioner_coarse_size;
294 unsigned subvec_index = Doftype_in_master_preconditioner_coarse[
i];
299 Master_block_preconditioner_pt->get_fine_grain_dof_types_in(
302 Doftype_in_master_preconditioner_fine.insert(
303 Doftype_in_master_preconditioner_fine.end(),
304 tmp_master_dof_subvec.begin(),
305 tmp_master_dof_subvec.end());
379 unsigned dof_type_index = 0;
380 for (
unsigned i = 0;
i < doftype_in_master_preconditioner_coarse_size;
385 unsigned coarse_dof = Doftype_in_master_preconditioner_coarse[
i];
387 unsigned n_master_fine_doftypes =
388 Master_block_preconditioner_pt->nfine_grain_dof_types_in(coarse_dof);
391 for (
unsigned j = 0; j < n_master_fine_doftypes; j++)
393 tmp_sub_vec.push_back(dof_type_index);
396 master_fine_doftype_translated.push_back(tmp_sub_vec);
405 unsigned doftype_coarsen_map_coarse_size =
406 Doftype_coarsen_map_coarse.size();
407 for (
unsigned i = 0;
i < doftype_coarsen_map_coarse_size;
i++)
410 unsigned doftype_coarsen_map_coarse_i_size =
411 Doftype_coarsen_map_coarse[
i].size();
412 for (
unsigned j = 0; j < doftype_coarsen_map_coarse_i_size; j++)
414 unsigned subvec_i = Doftype_coarsen_map_coarse[
i][j];
416 tmp_vec.insert(tmp_vec.end(),
417 master_fine_doftype_translated[subvec_i].begin(),
418 master_fine_doftype_translated[subvec_i].end());
421 Doftype_coarsen_map_fine.push_back(tmp_vec);
426 Internal_ndof_types = Doftype_in_master_preconditioner_fine.size();
429 Internal_nblock_types = Internal_ndof_types;
434 for (
unsigned b = 0; b < Internal_ndof_types; b++)
436 Nrow += this->internal_dof_block_dimension(b);
442 std::ostringstream error_message;
444 <<
"Nrow=0 in subsidiary preconditioner. This seems fishy and\n"
445 <<
"suggests that block_setup() was not called for the \n"
446 <<
"master block preconditioner yet.";
448 OOMPH_CURRENT_FUNCTION,
449 OOMPH_EXCEPTION_LOCATION);
471 if (is_master_block_preconditioner())
474 unsigned n_external_dof_types = dof_to_block_map.size();
482 unsigned n_internal_dof_types = internal_ndof_types();
484 if (n_internal_dof_types != n_external_dof_types)
486 std::ostringstream err_msg;
488 <<
"The internal ndof types and the length of the dof_to_block_map\n"
489 <<
"vector is not the same. Since this is the master block "
490 <<
"preconditioner,\n"
491 <<
"you must describe which block each DOF type belongs to,\n"
492 <<
"no more, no less."
493 <<
"internal_ndof_types = " << n_internal_dof_types <<
"\n"
494 <<
"dof_to_block_map.size() = " << n_external_dof_types <<
"\n";
496 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
501 Doftype_coarsen_map_fine.clear();
502 Doftype_coarsen_map_coarse.clear();
503 Doftype_coarsen_map_fine.reserve(n_external_dof_types);
504 Doftype_coarsen_map_coarse.reserve(n_external_dof_types);
507 for (
unsigned i = 0;
i < n_external_dof_types;
i++)
511 Doftype_coarsen_map_fine.push_back(tmp_vec);
512 Doftype_coarsen_map_coarse.push_back(tmp_vec);
522 if ((Doftype_coarsen_map_fine.size() == 0) ||
523 (Doftype_coarsen_map_coarse.size() == 0))
525 std::ostringstream err_msg;
526 err_msg <<
"Either the Doftype_coarsen_map_fine or the \n"
527 <<
"Doftype_coarsen_map_coarse vectors is of size 0.\n"
528 <<
"Did you remember to call the function "
529 <<
"turn_into_subsidiary_block_preconditioner(...)?";
531 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
581 unsigned dof_to_block_map_size = dof_to_block_map.size();
584 if (dof_to_block_map_size != Doftype_coarsen_map_coarse.size())
586 std::ostringstream err_msg;
588 <<
"The size of dof_to_block_map and Doftype_coarsen_map_coarse is "
591 <<
"dof_to_block_map.size() = " << dof_to_block_map_size <<
"\n"
592 <<
"Doftype_coarsen_map_coarse.size() = "
593 << Doftype_coarsen_map_coarse.size() <<
".\n"
594 <<
"One of the two list is incorrect, please look at the comments\n"
595 <<
"in the source code for more details.";
597 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
605 unsigned max_block_number =
606 *std::max_element(dof_to_block_map.begin(), dof_to_block_map.end());
624 Block_to_dof_map_coarse.clear();
626 const unsigned tmp_nblock = max_block_number + 1;
628 Block_to_dof_map_coarse.resize(tmp_nblock);
630 for (
unsigned i = 0;
i < dof_to_block_map_size;
i++)
632 Block_to_dof_map_coarse[dof_to_block_map[
i]].push_back(
i);
635 Block_to_dof_map_fine.clear();
636 Block_to_dof_map_fine.resize(tmp_nblock);
637 for (
unsigned block_i = 0; block_i < tmp_nblock; block_i++)
640 const unsigned ndof_in_block = Block_to_dof_map_coarse[block_i].size();
641 for (
unsigned dof_i = 0; dof_i < ndof_in_block; dof_i++)
643 const unsigned coarsened_dof_i =
644 Block_to_dof_map_coarse[block_i][dof_i];
649 Doftype_coarsen_map_fine[coarsened_dof_i];
651 Block_to_dof_map_fine[block_i].insert(
652 Block_to_dof_map_fine[block_i].end(),
668 unsigned tmp_internal_ndof_types = internal_ndof_types();
670 dof_to_block_map.resize(tmp_internal_ndof_types, 0);
672 for (
unsigned i = 0;
i < tmp_internal_ndof_types;
i++)
674 dof_to_block_map[
i] =
i;
683 if (is_master_block_preconditioner())
688 unsigned local_nmesh = nmesh();
691 if (local_nmesh == 0)
693 std::ostringstream error_msg;
694 error_msg <<
"Cannot setup blocks because no meshes have been set.";
696 error_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
702 for (
unsigned mesh_i = 0; mesh_i < local_nmesh; mesh_i++)
705 unsigned n_element = mesh_pt(mesh_i)->nelement();
713 typeid(*(mesh_pt(mesh_i)->element_pt(0))).name();
717 if (
bool(Allow_multiple_element_type_in_mesh[mesh_i]))
720 unsigned first_element_ndof_type =
721 mesh_pt(mesh_i)->element_pt(0)->ndof_types();
724 for (
unsigned el_i = 1; el_i < n_element; el_i++)
727 unsigned current_element_ndof_type =
728 mesh_pt(mesh_i)->element_pt(el_i)->ndof_types();
732 typeid(*(mesh_pt(mesh_i)->element_pt(el_i))).name();
735 if (current_element_ndof_type != first_element_ndof_type)
737 std::ostringstream error_message;
739 <<
"Elements in the same mesh MUST have the same number of "
742 <<
"The element in mesh " << mesh_i <<
", at position "
744 << current_element_string <<
", \n"
745 <<
"with ndof types: " << current_element_ndof_type <<
".\n"
746 <<
"The first element in the same mesh is: \n"
747 << first_element_string <<
", \n"
748 <<
"with ndof types: " << first_element_ndof_type <<
".\n";
749 throw OomphLibError(error_message.str(),
750 OOMPH_CURRENT_FUNCTION,
751 OOMPH_EXCEPTION_LOCATION);
760 for (
unsigned el_i = 1; el_i < n_element; el_i++)
764 typeid(*(mesh_pt(mesh_i)->element_pt(el_i))).name();
767 if (current_element_string.compare(first_element_string) != 0)
769 std::ostringstream error_message;
771 <<
"By default, a mesh containing block preconditionable "
772 <<
"elements must contain only one type of element.\n"
773 <<
"The element in mesh " << mesh_i <<
", at position "
775 << current_element_string <<
"\n"
776 <<
"The first element in the same mesh is: \n"
777 << first_element_string <<
"\n"
778 <<
"If this is correct, consider calling the set_mesh(...) "
780 <<
"the optional argument set true to allow multiple "
782 <<
"types in the same mesh.\n"
783 <<
"Note: A minimal requirement is that the elements in the "
785 <<
"mesh MUST have the same number of DOF types.";
786 throw OomphLibError(error_message.str(),
787 OOMPH_CURRENT_FUNCTION,
788 OOMPH_EXCEPTION_LOCATION);
798 this->clear_block_preconditioner_base();
802 unsigned my_rank = comm_pt()->my_rank();
803 unsigned nproc = comm_pt()->nproc();
811 unsigned* nreq_sparse =
new unsigned[nproc]();
812 unsigned* nreq_sparse_for_proc =
new unsigned[nproc]();
813 unsigned** index_in_dof_block_sparse_send =
new unsigned*[nproc]();
814 unsigned** dof_number_sparse_send =
new unsigned*[nproc]();
815 Vector<MPI_Request> send_requests_sparse;
816 Vector<MPI_Request> recv_requests_sparse;
822 if (is_master_block_preconditioner())
825 Ndof_types_in_mesh.assign(nmesh(), 0);
826 for (
unsigned i = 0;
i < nmesh();
i++)
828 Ndof_types_in_mesh[
i] = mesh_pt(
i)->ndof_types();
832 if (
dynamic_cast<DistributableLinearAlgebraObject*
>(matrix_pt()))
834 this->build_distribution(
835 dynamic_cast<DistributableLinearAlgebraObject*
>(matrix_pt())
836 ->distribution_pt());
840 LinearAlgebraDistribution dist(comm_pt(), matrix_pt()->nrow(),
false);
841 this->build_distribution(dist);
843 Nrow = matrix_pt()->nrow();
847 bool matrix_distributed =
848 (this->distribution_pt()->distributed() &&
849 this->distribution_pt()->communicator_pt()->nproc() > 1);
853 CRDoubleMatrix* cr_matrix_pt =
dynamic_cast<CRDoubleMatrix*
>(matrix_pt());
855 if (cr_matrix_pt == 0)
857 std::ostringstream error_message;
858 error_message <<
"Block setup for distributed matrices only works "
859 <<
"for CRDoubleMatrices";
860 throw OomphLibError(error_message.str(),
861 OOMPH_CURRENT_FUNCTION,
862 OOMPH_EXCEPTION_LOCATION);
867 unsigned first_row = this->distribution_pt()->first_row();
868 unsigned nrow_local = this->distribution_pt()->nrow_local();
869 unsigned last_row = first_row + nrow_local - 1;
876 DenseMatrix<unsigned> dense_required_rows(nproc, 2);
877 for (
unsigned p = 0; p < nproc; p++)
879 dense_required_rows(p, 0) = this->distribution_pt()->first_row(p);
880 dense_required_rows(p, 1) = this->distribution_pt()->first_row(p) +
881 this->distribution_pt()->nrow_local(p) - 1;
888 std::set<unsigned> sparse_global_rows_for_block_lookup;
889 if (matrix_distributed)
891 unsigned nnz = cr_matrix_pt->nnz();
892 int* column_index = cr_matrix_pt->column_index();
893 for (
unsigned i = 0;
i < nnz;
i++)
895 unsigned ci = column_index[
i];
896 if (ci < first_row || ci > last_row)
898 sparse_global_rows_for_block_lookup.insert(ci);
903 int nsparse = sparse_global_rows_for_block_lookup.size();
905 Global_index_sparse.resize(0);
906 std::copy(sparse_global_rows_for_block_lookup.begin(),
907 sparse_global_rows_for_block_lookup.end(),
908 std::back_inserter(Global_index_sparse));
910 Index_in_dof_block_sparse.resize(nsparse);
911 Dof_number_sparse.resize(nsparse);
912 sparse_global_rows_for_block_lookup.clear();
914 Vector<MPI_Request> recv_requests_sparse_nreq;
915 if (matrix_distributed)
917 MPI_Aint base_displacement_sparse;
918 MPI_Get_address(nreq_sparse, &base_displacement_sparse);
921 for (
unsigned p = 0; p < nproc; p++)
926 for (
int i = 0;
i < nsparse; ++
i)
928 if (Global_index_sparse[
i] < dense_required_rows(p, 0))
934 if (Global_index_sparse[
i] <= dense_required_rows(p, 1))
946 if (nreq_sparse[p] > 0)
950 MPI_Isend(&nreq_sparse[p],
955 comm_pt()->mpi_comm(),
957 send_requests_sparse.push_back(req1);
961 MPI_Isend(&Global_index_sparse[begin],
966 comm_pt()->mpi_comm(),
968 send_requests_sparse.push_back(req2);
973 MPI_Datatype types[2];
974 MPI_Aint displacements[2];
978 MPI_Type_contiguous(nreq_sparse[p], MPI_UNSIGNED, &types[0]);
979 MPI_Type_commit(&types[0]);
980 MPI_Get_address(&Index_in_dof_block_sparse[begin],
982 displacements[0] -= base_displacement_sparse;
986 MPI_Type_contiguous(nreq_sparse[p], MPI_UNSIGNED, &types[1]);
987 MPI_Type_commit(&types[1]);
988 MPI_Get_address(&Dof_number_sparse[begin], &displacements[1]);
989 displacements[1] -= base_displacement_sparse;
993 MPI_Datatype recv_type;
994 MPI_Type_create_struct(
995 2, lengths, displacements, types, &recv_type);
996 MPI_Type_commit(&recv_type);
997 MPI_Type_free(&types[0]);
998 MPI_Type_free(&types[1]);
1003 nreq_sparse, 1, recv_type, p, 33, comm_pt()->mpi_comm(), &req);
1004 recv_requests_sparse.push_back(req);
1005 MPI_Type_free(&recv_type);
1009 if (nreq_sparse[p] == 0)
1013 &zero, 1, MPI_UNSIGNED, p, 31, comm_pt()->mpi_comm(), &req1);
1014 send_requests_sparse.push_back(req1);
1019 MPI_Irecv(&nreq_sparse_for_proc[p],
1024 comm_pt()->mpi_comm(),
1026 recv_requests_sparse_nreq.push_back(req);
1032 Dof_number_dense.resize(nrow_local);
1033 Index_in_dof_block_dense.resize(nrow_local);
1036 Internal_ndof_types = 0;
1041 Vector<int> previously_assigned_block_number(nrow_local,
1046 bool problem_distributed =
false;
1049 #ifdef OOMPH_HAS_MPI
1050 problem_distributed = any_mesh_distributed();
1054 if (!problem_distributed)
1059 unsigned dof_offset = 0;
1062 for (
unsigned m = 0; m < nmesh(); m++)
1065 unsigned n_element = mesh_pt(m)->nelement();
1069 unsigned ndof_in_element = ndof_types_in_mesh(m);
1070 Internal_ndof_types += ndof_in_element;
1072 for (
unsigned e = 0;
e < n_element;
e++)
1076 std::list<std::pair<unsigned long, unsigned>> dof_lookup_list;
1079 mesh_pt(m)->element_pt(
e)->get_dof_numbers_for_unknowns(
1084 typedef std::list<std::pair<unsigned long, unsigned>>::iterator IT;
1085 for (IT it = dof_lookup_list.begin(); it != dof_lookup_list.end();
1088 unsigned long global_dof = it->first;
1089 if (global_dof >=
unsigned(first_row) &&
1090 global_dof <=
unsigned(last_row))
1092 unsigned dof_number = (it->second) + dof_offset;
1093 Dof_number_dense[global_dof - first_row] = dof_number;
1097 if (previously_assigned_block_number[global_dof - first_row] <
1100 previously_assigned_block_number[global_dof - first_row] =
1113 dof_offset += ndof_in_element;
1118 for (
unsigned i = 0;
i < nrow_local;
i++)
1120 if (previously_assigned_block_number[
i] < 0)
1122 std::ostringstream error_message;
1123 error_message <<
"Not all degrees of freedom have had DOF type "
1124 <<
"numbers allocated. Dof number " <<
i
1125 <<
" is unallocated.";
1126 throw OomphLibError(error_message.str(),
1127 OOMPH_CURRENT_FUNCTION,
1128 OOMPH_EXCEPTION_LOCATION);
1136 #ifdef OOMPH_HAS_MPI
1140 unsigned dof_offset = 0;
1144 std::map<unsigned long, unsigned> my_dof_map;
1147 for (
unsigned m = 0; m < nmesh(); m++)
1150 unsigned n_element = this->mesh_pt(m)->nelement();
1154 unsigned ndof_in_element = ndof_types_in_mesh(m);
1155 Internal_ndof_types += ndof_in_element;
1158 for (
unsigned e = 0;
e < n_element;
e++)
1161 if (!this->mesh_pt(m)->element_pt(
e)->is_halo())
1165 std::list<std::pair<unsigned long, unsigned>> dof_lookup_list;
1169 this->mesh_pt(m)->element_pt(
e)->get_dof_numbers_for_unknowns(
1173 typedef std::list<std::pair<unsigned long, unsigned>>::iterator
1175 for (IT it = dof_lookup_list.begin(); it != dof_lookup_list.end();
1178 it->second = (it->second) + dof_offset;
1179 my_dof_map[it->first] = it->second;
1189 dof_offset += ndof_in_element;
1193 unsigned my_ndof = my_dof_map.size();
1194 unsigned long* my_global_dofs =
new unsigned long[my_ndof];
1195 unsigned* my_dof_numbers =
new unsigned[my_ndof];
1196 typedef std::map<unsigned long, unsigned>::iterator IT;
1198 for (IT it = my_dof_map.begin(); it != my_dof_map.end(); it++)
1200 my_global_dofs[pt] = it->first;
1201 my_dof_numbers[pt] = it->second;
1209 int* first_dof_to_send =
new int[nproc];
1210 int* ndof_to_send =
new int[nproc];
1212 for (
unsigned p = 0; p < nproc; p++)
1214 first_dof_to_send[p] = 0;
1215 ndof_to_send[p] = 0;
1216 while (ptr < my_ndof &&
1217 my_global_dofs[ptr] < dense_required_rows(p, 0))
1221 first_dof_to_send[p] = ptr;
1222 while (ptr < my_ndof &&
1223 my_global_dofs[ptr] <= dense_required_rows(p, 1))
1231 int* ndof_to_recv =
new int[nproc];
1232 MPI_Alltoall(ndof_to_send,
1238 comm_pt()->mpi_comm());
1241 MPI_Aint base_displacement;
1242 MPI_Get_address(my_global_dofs, &base_displacement);
1247 std::vector<bool> dof_recv(nrow_local,
false);
1251 Vector<MPI_Request> send_requests;
1252 Vector<MPI_Request> recv_requests;
1253 Vector<unsigned long*> global_dofs_recv(nproc, 0);
1254 Vector<unsigned*> dof_numbers_recv(nproc, 0);
1255 Vector<unsigned> proc;
1256 for (
unsigned p = 0; p < nproc; p++)
1261 if (ndof_to_send[p] > 0)
1264 MPI_Datatype types[2];
1265 MPI_Aint displacements[2];
1269 MPI_Type_contiguous(
1270 ndof_to_send[p], MPI_UNSIGNED_LONG, &types[0]);
1271 MPI_Type_commit(&types[0]);
1272 MPI_Get_address(my_global_dofs + first_dof_to_send[p],
1274 displacements[0] -= base_displacement;
1278 MPI_Type_contiguous(ndof_to_send[p], MPI_UNSIGNED, &types[1]);
1279 MPI_Type_commit(&types[1]);
1280 MPI_Get_address(my_dof_numbers + first_dof_to_send[p],
1282 displacements[1] -= base_displacement;
1286 MPI_Datatype send_type;
1287 MPI_Type_create_struct(
1288 2, lengths, displacements, types, &send_type);
1289 MPI_Type_commit(&send_type);
1290 MPI_Type_free(&types[0]);
1291 MPI_Type_free(&types[1]);
1295 MPI_Isend(my_global_dofs,
1300 comm_pt()->mpi_comm(),
1302 send_requests.push_back(req);
1303 MPI_Type_free(&send_type);
1307 if (ndof_to_recv[p] > 0)
1310 global_dofs_recv[p] =
new unsigned long[ndof_to_recv[p]];
1311 dof_numbers_recv[p] =
new unsigned[ndof_to_recv[p]];
1315 MPI_Datatype types[2];
1316 MPI_Aint displacements[2];
1320 MPI_Type_contiguous(
1321 ndof_to_recv[p], MPI_UNSIGNED_LONG, &types[0]);
1322 MPI_Type_commit(&types[0]);
1323 MPI_Get_address(global_dofs_recv[p], &displacements[0]);
1324 displacements[0] -= base_displacement;
1328 MPI_Type_contiguous(ndof_to_recv[p], MPI_UNSIGNED, &types[1]);
1329 MPI_Type_commit(&types[1]);
1330 MPI_Get_address(dof_numbers_recv[p], &displacements[1]);
1331 displacements[1] -= base_displacement;
1335 MPI_Datatype recv_type;
1336 MPI_Type_create_struct(
1337 2, lengths, displacements, types, &recv_type);
1338 MPI_Type_commit(&recv_type);
1339 MPI_Type_free(&types[0]);
1340 MPI_Type_free(&types[1]);
1344 MPI_Irecv(my_global_dofs,
1349 comm_pt()->mpi_comm(),
1351 recv_requests.push_back(req);
1352 MPI_Type_free(&recv_type);
1358 unsigned u = first_dof_to_send[p] + ndof_to_recv[p];
1359 for (
unsigned i = first_dof_to_send[p];
i < u;
i++)
1363 dof_recv[my_global_dofs[
i] - first_row] =
true;
1365 Dof_number_dense[my_global_dofs[
i] - first_row] =
1372 unsigned c_recv = recv_requests.size();
1378 c_recv, &recv_requests[0], &req_number, MPI_STATUS_IGNORE);
1379 recv_requests.erase(recv_requests.begin() + req_number);
1383 unsigned p = proc[req_number];
1384 proc.erase(proc.begin() + req_number);
1387 for (
int i = 0;
i < ndof_to_recv[p];
i++)
1391 dof_recv[global_dofs_recv[p][
i] - first_row] =
true;
1393 Dof_number_dense[global_dofs_recv[p][
i] - first_row] =
1394 dof_numbers_recv[p][
i];
1398 delete[] global_dofs_recv[p];
1399 delete[] dof_numbers_recv[p];
1404 unsigned csr = send_requests.size();
1407 MPI_Waitall(csr, &send_requests[0], MPI_STATUS_IGNORE);
1411 delete[] ndof_to_send;
1412 delete[] first_dof_to_send;
1413 delete[] ndof_to_recv;
1414 delete[] my_global_dofs;
1415 delete[] my_dof_numbers;
1417 unsigned all_recv =
true;
1418 for (
unsigned i = 0;
i < nrow_local;
i++)
1427 std::ostringstream error_message;
1428 error_message <<
"Not all the DOF numbers required were received";
1429 throw OomphLibError(error_message.str(),
1430 OOMPH_CURRENT_FUNCTION,
1431 OOMPH_EXCEPTION_LOCATION);
1435 std::ostringstream error_message;
1437 <<
"The problem appears to be distributed, MPI is required";
1438 throw OomphLibError(error_message.str(),
1439 OOMPH_CURRENT_FUNCTION,
1440 OOMPH_EXCEPTION_LOCATION);
1443 #ifdef OOMPH_HAS_MPI
1444 Vector<unsigned*> sparse_rows_for_proc(nproc, 0);
1445 Vector<MPI_Request> sparse_rows_for_proc_requests;
1446 if (matrix_distributed)
1450 if (recv_requests_sparse_nreq.size() > 0)
1452 MPI_Waitall(recv_requests_sparse_nreq.size(),
1453 &recv_requests_sparse_nreq[0],
1456 for (
unsigned p = 0; p < nproc; ++p)
1458 if (nreq_sparse_for_proc[p] > 0)
1461 sparse_rows_for_proc[p] =
new unsigned[nreq_sparse_for_proc[p]];
1462 MPI_Irecv(sparse_rows_for_proc[p],
1463 nreq_sparse_for_proc[p],
1467 comm_pt()->mpi_comm(),
1469 sparse_rows_for_proc_requests.push_back(req);
1480 Dof_dimension.assign(Internal_ndof_types, 0);
1483 if (!matrix_distributed)
1486 unsigned nrow = this->distribution_pt()->nrow();
1487 Index_in_dof_block_dense.resize(nrow);
1488 Index_in_dof_block_dense.initialise(0);
1489 for (
unsigned i = 0;
i < nrow;
i++)
1491 Index_in_dof_block_dense[
i] = Dof_dimension[Dof_number_dense[
i]];
1492 Dof_dimension[Dof_number_dense[
i]]++;
1499 #ifdef OOMPH_HAS_MPI
1504 unsigned* my_nrows_in_dof_block =
new unsigned[Internal_ndof_types];
1505 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1507 my_nrows_in_dof_block[
i] = 0;
1509 for (
unsigned i = 0;
i < nrow_local;
i++)
1511 my_nrows_in_dof_block[Dof_number_dense[
i]]++;
1515 unsigned* nrow_in_dof_block_recv =
1516 new unsigned[Internal_ndof_types * nproc];
1517 MPI_Allgather(my_nrows_in_dof_block,
1518 Internal_ndof_types,
1520 nrow_in_dof_block_recv,
1521 Internal_ndof_types,
1523 comm_pt()->mpi_comm());
1524 delete[] my_nrows_in_dof_block;
1527 Vector<unsigned> my_first_dof_index(Internal_ndof_types, 0);
1528 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1530 for (
unsigned p = 0; p < my_rank; p++)
1532 my_first_dof_index[
i] +=
1533 nrow_in_dof_block_recv[p * Internal_ndof_types +
i];
1535 Dof_dimension[
i] = my_first_dof_index[
i];
1536 for (
unsigned p = my_rank; p < nproc; p++)
1539 nrow_in_dof_block_recv[p * Internal_ndof_types +
i];
1542 delete[] nrow_in_dof_block_recv;
1545 Index_in_dof_block_dense.resize(nrow_local);
1546 Index_in_dof_block_dense.initialise(0);
1548 for (
unsigned i = 0;
i < nrow_local;
i++)
1550 Index_in_dof_block_dense[
i] =
1551 my_first_dof_index[Dof_number_dense[
i]] +
1552 dof_counter[Dof_number_dense[
i]];
1553 dof_counter[Dof_number_dense[
i]]++;
1557 if (sparse_rows_for_proc_requests.size() > 0)
1559 MPI_Waitall(sparse_rows_for_proc_requests.size(),
1560 &sparse_rows_for_proc_requests[0],
1563 MPI_Aint base_displacement;
1564 MPI_Get_address(dof_number_sparse_send, &base_displacement);
1565 unsigned first_row = this->distribution_pt()->first_row();
1566 for (
unsigned p = 0; p < nproc; ++p)
1568 if (nreq_sparse_for_proc[p] > 0)
1571 index_in_dof_block_sparse_send[p] =
1572 new unsigned[nreq_sparse_for_proc[p]];
1573 dof_number_sparse_send[p] =
new unsigned[nreq_sparse_for_proc[p]];
1574 for (
unsigned i = 0;
i < nreq_sparse_for_proc[p]; ++
i)
1576 unsigned r = sparse_rows_for_proc[p][
i];
1578 index_in_dof_block_sparse_send[p][
i] =
1579 Index_in_dof_block_dense[r];
1580 dof_number_sparse_send[p][
i] = Dof_number_dense[r];
1582 delete[] sparse_rows_for_proc[p];
1586 MPI_Datatype types[2];
1587 MPI_Aint displacements[2];
1591 MPI_Type_contiguous(
1592 nreq_sparse_for_proc[p], MPI_UNSIGNED, &types[0]);
1593 MPI_Type_commit(&types[0]);
1594 MPI_Get_address(index_in_dof_block_sparse_send[p],
1596 displacements[0] -= base_displacement;
1600 MPI_Type_contiguous(
1601 nreq_sparse_for_proc[p], MPI_UNSIGNED, &types[1]);
1602 MPI_Type_commit(&types[1]);
1603 MPI_Get_address(dof_number_sparse_send[p], &displacements[1]);
1604 displacements[1] -= base_displacement;
1608 MPI_Datatype send_type;
1609 MPI_Type_create_struct(
1610 2, lengths, displacements, types, &send_type);
1611 MPI_Type_commit(&send_type);
1612 MPI_Type_free(&types[0]);
1613 MPI_Type_free(&types[1]);
1617 MPI_Isend(dof_number_sparse_send,
1622 comm_pt()->mpi_comm(),
1624 send_requests_sparse.push_back(req);
1625 MPI_Type_free(&send_type);
1629 index_in_dof_block_sparse_send[p] = 0;
1630 dof_number_sparse_send[p] = 0;
1645 if (dof_to_block_map.size() != Internal_ndof_types)
1647 std::ostringstream error_message;
1648 error_message <<
"The dof_to_block_map vector (size="
1649 << dof_to_block_map.size()
1650 <<
") must be of size Internal_ndof_types="
1651 << Internal_ndof_types;
1653 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
1658 unsigned max_block_number = 0;
1659 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1661 if (dof_to_block_map[
i] > max_block_number)
1663 max_block_number = dof_to_block_map[
i];
1668 Block_number_to_dof_number_lookup.clear();
1669 Block_number_to_dof_number_lookup.resize(max_block_number + 1);
1670 Ndof_in_block.clear();
1671 Ndof_in_block.resize(max_block_number + 1);
1674 Dof_number_to_block_number_lookup.resize(Internal_ndof_types);
1677 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1679 Dof_number_to_block_number_lookup[
i] = dof_to_block_map[
i];
1680 Block_number_to_dof_number_lookup[dof_to_block_map[
i]].push_back(
i);
1681 Ndof_in_block[dof_to_block_map[
i]]++;
1687 for (
unsigned i = 0;
i < max_block_number + 1;
i++)
1689 if (Block_number_to_dof_number_lookup[
i].size() == 0)
1691 std::ostringstream error_message;
1692 error_message <<
"block number " <<
i
1693 <<
" does not have any DOFs associated with it";
1695 OOMPH_CURRENT_FUNCTION,
1696 OOMPH_EXCEPTION_LOCATION);
1702 Internal_nblock_types = max_block_number + 1;
1705 bool distributed = this->master_distribution_pt()->distributed();
1708 Internal_block_distribution_pt.resize(Internal_nblock_types);
1709 for (
unsigned i = 0;
i < Internal_nblock_types;
i++)
1711 unsigned block_dim = 0;
1712 for (
unsigned j = 0; j < Ndof_in_block[
i]; j++)
1715 internal_dof_block_dimension(Block_number_to_dof_number_lookup[
i][j]);
1717 Internal_block_distribution_pt[
i] =
1718 new LinearAlgebraDistribution(comm_pt(), block_dim, distributed);
1726 if (is_subsidiary_block_preconditioner())
1729 const unsigned dof_block_distribution_size =
1730 Dof_block_distribution_pt.size();
1731 for (
unsigned dof_i = 0; dof_i < dof_block_distribution_size; dof_i++)
1733 delete Dof_block_distribution_pt[dof_i];
1735 const unsigned ndofs = this->ndof_types();
1736 Dof_block_distribution_pt.resize(ndofs, 0);
1740 for (
unsigned dof_i = 0; dof_i < ndofs; dof_i++)
1744 const unsigned ncoarsened_dofs_in_dof_i =
1745 Doftype_coarsen_map_coarse[dof_i].size();
1746 Vector<LinearAlgebraDistribution*> tmp_dist_pt(ncoarsened_dofs_in_dof_i,
1748 for (
unsigned parent_dof_i = 0; parent_dof_i < ncoarsened_dofs_in_dof_i;
1751 tmp_dist_pt[parent_dof_i] =
1752 master_block_preconditioner_pt()->dof_block_distribution_pt(
1753 Doftype_in_master_preconditioner_coarse
1754 [Doftype_coarsen_map_coarse[dof_i][parent_dof_i]]);
1757 Dof_block_distribution_pt[dof_i] =
new LinearAlgebraDistribution;
1761 tmp_dist_pt, *Dof_block_distribution_pt[dof_i]);
1770 unsigned n_existing_block_dist = Block_distribution_pt.size();
1771 for (
unsigned dist_i = 0; dist_i < n_existing_block_dist; dist_i++)
1773 delete Block_distribution_pt[dist_i];
1776 Block_distribution_pt.clear();
1779 unsigned super_block_size = Block_to_dof_map_coarse.size();
1780 Block_distribution_pt.resize(super_block_size, 0);
1781 for (
unsigned super_block_i = 0; super_block_i < super_block_size;
1784 unsigned sub_block_size = Block_to_dof_map_coarse[super_block_i].size();
1785 Vector<LinearAlgebraDistribution*> tmp_dist_pt(sub_block_size, 0);
1787 for (
unsigned sub_block_i = 0; sub_block_i < sub_block_size;
1790 tmp_dist_pt[sub_block_i] = dof_block_distribution_pt(
1791 Block_to_dof_map_coarse[super_block_i][sub_block_i]);
1794 Block_distribution_pt[super_block_i] =
new LinearAlgebraDistribution;
1797 tmp_dist_pt, *Block_distribution_pt[super_block_i]);
1808 LinearAlgebraDistribution dist;
1810 Internal_block_distribution_pt, dist);
1813 if (is_subsidiary_block_preconditioner())
1815 this->build_distribution(dist);
1819 Internal_preconditioner_matrix_distribution_pt =
1820 new LinearAlgebraDistribution(dist);
1823 Preconditioner_matrix_distribution_pt =
new LinearAlgebraDistribution;
1825 Block_distribution_pt, *Preconditioner_matrix_distribution_pt);
1834 const unsigned nblocks = Block_distribution_pt.size();
1835 Vector<unsigned> preconditioner_matrix_key(nblocks, 0);
1836 for (
unsigned i = 0;
i < nblocks;
i++)
1838 preconditioner_matrix_key[
i] =
i;
1844 std::map<Vector<unsigned>, LinearAlgebraDistribution*>::iterator iter =
1845 Auxiliary_block_distribution_pt.begin();
1846 while (iter != Auxiliary_block_distribution_pt.end())
1848 if (iter->first != preconditioner_matrix_key)
1850 delete iter->second;
1860 Auxiliary_block_distribution_pt.clear();
1863 insert_auxiliary_block_distribution(
1864 preconditioner_matrix_key, Preconditioner_matrix_distribution_pt);
1868 #ifdef OOMPH_HAS_MPI
1869 if (send_requests_sparse.size() > 0)
1871 MPI_Waitall(send_requests_sparse.size(),
1872 &send_requests_sparse[0],
1875 if (recv_requests_sparse.size() > 0)
1877 MPI_Waitall(recv_requests_sparse.size(),
1878 &recv_requests_sparse[0],
1881 for (
unsigned p = 0; p < nproc; p++)
1883 delete[] index_in_dof_block_sparse_send[p];
1884 delete[] dof_number_sparse_send[p];
1886 delete[] index_in_dof_block_sparse_send;
1887 delete[] dof_number_sparse_send;
1888 delete[] nreq_sparse;
1889 delete[] nreq_sparse_for_proc;
1898 Global_index.resize(Internal_nblock_types);
1899 for (
unsigned b = 0; b < Internal_nblock_types; b++)
1901 Global_index[b].resize(Internal_block_distribution_pt[b]->nrow());
1905 unsigned nrow = this->master_nrow();
1906 for (
unsigned i = 0;
i < nrow;
i++)
1909 int dof_number = this->internal_dof_number(
i);
1910 if (dof_number >= 0)
1913 unsigned block_number = Dof_number_to_block_number_lookup[dof_number];
1916 unsigned index_in_block = 0;
1918 while (
int(Block_number_to_dof_number_lookup[block_number][ptr]) !=
1921 index_in_block += internal_dof_block_dimension(
1922 Block_number_to_dof_number_lookup[block_number][ptr]);
1925 index_in_block += internal_index_in_dof(
i);
1926 Global_index[block_number][index_in_block] =
i;
1933 #ifdef OOMPH_HAS_MPI
1936 const LinearAlgebraDistribution* master_distribution_pt =
1937 this->master_distribution_pt();
1940 Nrows_to_send_for_get_block.resize(Internal_nblock_types, nproc);
1941 Nrows_to_send_for_get_block.initialise(0);
1942 Nrows_to_send_for_get_ordered.resize(nproc);
1943 Nrows_to_send_for_get_ordered.initialise(0);
1946 unsigned nrow_local = master_distribution_pt->nrow_local();
1947 unsigned first_row = master_distribution_pt->first_row();
1948 for (
unsigned i = 0;
i < nrow_local;
i++)
1951 int b = this->internal_block_number(first_row +
i);
1957 unsigned j = this->internal_index_in_block(first_row +
i);
1960 unsigned block_p = 0;
1961 while (!(Internal_block_distribution_pt[b]->first_row(block_p) <= j &&
1962 (Internal_block_distribution_pt[b]->first_row(block_p) +
1963 Internal_block_distribution_pt[b]->nrow_local(block_p) >
1970 Nrows_to_send_for_get_block(b, block_p)++;
1971 Nrows_to_send_for_get_ordered[block_p]++;
1976 Nrows_to_recv_for_get_block.resize(Internal_nblock_types, nproc);
1977 Nrows_to_recv_for_get_block.initialise(0);
1978 Nrows_to_recv_for_get_ordered.resize(nproc);
1979 Nrows_to_recv_for_get_ordered.initialise(0);
1982 Vector<unsigned*> nrows_to_send(nproc, 0);
1983 Vector<unsigned*> nrows_to_recv(nproc, 0);
1984 Vector<MPI_Request> send_requests_nrow;
1985 Vector<MPI_Request> recv_requests_nrow;
1986 Vector<unsigned> proc;
1987 for (
unsigned p = 0; p < nproc; p++)
1993 nrows_to_send[p] =
new unsigned[Internal_nblock_types];
1994 for (
unsigned b = 0; b < Internal_nblock_types; b++)
1996 nrows_to_send[p][b] = Nrows_to_send_for_get_block(b, p);
1999 MPI_Isend(nrows_to_send[p],
2000 Internal_nblock_types,
2004 comm_pt()->mpi_comm(),
2006 send_requests_nrow.push_back(s_req);
2009 nrows_to_recv[p] =
new unsigned[Internal_nblock_types];
2011 MPI_Irecv(nrows_to_recv[p],
2012 Internal_nblock_types,
2016 comm_pt()->mpi_comm(),
2018 recv_requests_nrow.push_back(r_req);
2023 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2025 Nrows_to_recv_for_get_block(b, p) =
2026 Nrows_to_send_for_get_block(b, p);
2028 Nrows_to_recv_for_get_ordered[p] = Nrows_to_send_for_get_ordered[p];
2034 DenseMatrix<int*> block_rows_to_send(Internal_nblock_types, nproc, 0);
2035 Vector<int*> ordered_rows_to_send(nproc, 0);
2038 Rows_to_send_for_get_block.resize(Internal_nblock_types, nproc);
2039 Rows_to_send_for_get_block.initialise(0);
2040 Rows_to_send_for_get_ordered.resize(nproc);
2041 Rows_to_send_for_get_ordered.initialise(0);
2042 Rows_to_recv_for_get_block.resize(Internal_nblock_types, nproc);
2043 Rows_to_recv_for_get_block.initialise(0);
2046 for (
unsigned p = 0; p < nproc; p++)
2048 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2050 Rows_to_send_for_get_block(b, p) =
2051 new int[Nrows_to_send_for_get_block(b, p)];
2054 block_rows_to_send(b, p) =
2055 new int[Nrows_to_send_for_get_block(b, p)];
2059 Rows_to_recv_for_get_block(b, p) =
2060 new int[Nrows_to_send_for_get_block(b, p)];
2063 Rows_to_send_for_get_ordered[p] =
2064 new int[Nrows_to_send_for_get_ordered[p]];
2069 DenseMatrix<unsigned> ptr_block(Internal_nblock_types, nproc, 0);
2070 for (
unsigned i = 0;
i < nrow_local;
i++)
2073 int b = this->internal_block_number(first_row +
i);
2079 unsigned j = this->internal_index_in_block(first_row +
i);
2082 unsigned block_p = 0;
2083 while (!(Internal_block_distribution_pt[b]->first_row(block_p) <= j &&
2084 (Internal_block_distribution_pt[b]->first_row(block_p) +
2085 Internal_block_distribution_pt[b]->nrow_local(block_p) >
2092 Rows_to_send_for_get_block(b, block_p)[ptr_block(b, block_p)] =
i;
2093 if (block_p != my_rank)
2095 block_rows_to_send(b, block_p)[ptr_block(b, block_p)] =
2096 j - Internal_block_distribution_pt[b]->first_row(block_p);
2100 Rows_to_recv_for_get_block(b, block_p)[ptr_block(b, block_p)] =
2101 j - Internal_block_distribution_pt[b]->first_row(block_p);
2103 ptr_block(b, block_p)++;
2108 for (
unsigned p = 0; p < nproc; ++p)
2111 for (
unsigned b = 0; b < Internal_nblock_types; ++b)
2113 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(b, p); ++
i)
2115 Rows_to_send_for_get_ordered[p][pt] =
2116 Rows_to_send_for_get_block(b, p)[
i];
2125 unsigned c = recv_requests_nrow.size();
2130 MPI_Waitany(c, &recv_requests_nrow[0], &req_number, MPI_STATUS_IGNORE);
2131 recv_requests_nrow.erase(recv_requests_nrow.begin() + req_number);
2135 unsigned p = proc[req_number];
2136 proc.erase(proc.begin() + req_number);
2139 Nrows_to_recv_for_get_ordered[p] = 0;
2140 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2142 Nrows_to_recv_for_get_block(b, p) = nrows_to_recv[p][b];
2143 Nrows_to_recv_for_get_ordered[p] += nrows_to_recv[p][b];
2147 delete[] nrows_to_recv[p];
2151 Rows_to_recv_for_get_ordered.resize(nproc, 0);
2152 for (
unsigned p = 0; p < nproc; p++)
2156 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2158 Rows_to_recv_for_get_block(b, p) =
2159 new int[Nrows_to_recv_for_get_block(b, p)];
2166 Vector<unsigned> nsend_for_rows(nproc, 0);
2167 Vector<unsigned> nrecv_for_rows(nproc, 0);
2168 for (
unsigned p = 0; p < nproc; p++)
2172 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2174 if (Nrows_to_send_for_get_block(b, p) > 0)
2176 nsend_for_rows[p]++;
2178 if (Nrows_to_recv_for_get_block(b, p) > 0)
2180 nrecv_for_rows[p]++;
2187 MPI_Aint base_displacement;
2188 MPI_Get_address(matrix_pt(), &base_displacement);
2189 Vector<MPI_Request> req_rows;
2190 for (
unsigned p = 0; p < nproc; p++)
2195 if (nsend_for_rows[p] > 0)
2197 MPI_Datatype send_types[nsend_for_rows[p]];
2198 MPI_Aint send_displacements[nsend_for_rows[p]];
2199 int send_sz[nsend_for_rows[p]];
2200 unsigned send_ptr = 0;
2201 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2203 if (Nrows_to_send_for_get_block(b, p) > 0)
2205 MPI_Type_contiguous(Nrows_to_send_for_get_block(b, p),
2207 &send_types[send_ptr]);
2208 MPI_Type_commit(&send_types[send_ptr]);
2209 MPI_Get_address(block_rows_to_send(b, p),
2210 &send_displacements[send_ptr]);
2211 send_displacements[send_ptr] -= base_displacement;
2212 send_sz[send_ptr] = 1;
2216 MPI_Datatype final_send_type;
2217 MPI_Type_create_struct(nsend_for_rows[p],
2222 MPI_Type_commit(&final_send_type);
2223 for (
unsigned i = 0;
i < nsend_for_rows[p];
i++)
2225 MPI_Type_free(&send_types[
i]);
2227 MPI_Request send_req;
2228 MPI_Isend(matrix_pt(),
2233 comm_pt()->mpi_comm(),
2235 req_rows.push_back(send_req);
2236 MPI_Type_free(&final_send_type);
2240 if (nrecv_for_rows[p] > 0)
2242 MPI_Datatype recv_types[nrecv_for_rows[p]];
2243 MPI_Aint recv_displacements[nrecv_for_rows[p]];
2244 int recv_sz[nrecv_for_rows[p]];
2245 unsigned recv_ptr = 0;
2246 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2248 if (Nrows_to_recv_for_get_block(b, p) > 0)
2250 MPI_Type_contiguous(Nrows_to_recv_for_get_block(b, p),
2252 &recv_types[recv_ptr]);
2253 MPI_Type_commit(&recv_types[recv_ptr]);
2254 MPI_Get_address(Rows_to_recv_for_get_block(b, p),
2255 &recv_displacements[recv_ptr]);
2256 recv_displacements[recv_ptr] -= base_displacement;
2257 recv_sz[recv_ptr] = 1;
2261 MPI_Datatype final_recv_type;
2262 MPI_Type_create_struct(nrecv_for_rows[p],
2267 MPI_Type_commit(&final_recv_type);
2268 for (
unsigned i = 0;
i < nrecv_for_rows[p];
i++)
2270 MPI_Type_free(&recv_types[
i]);
2272 MPI_Request recv_req;
2273 MPI_Irecv(matrix_pt(),
2278 comm_pt()->mpi_comm(),
2280 req_rows.push_back(recv_req);
2281 MPI_Type_free(&final_recv_type);
2291 unsigned n_req_rows = req_rows.size();
2294 MPI_Waitall(n_req_rows, &req_rows[0], MPI_STATUS_IGNORE);
2298 Rows_to_recv_for_get_ordered.resize(nproc);
2299 Rows_to_recv_for_get_ordered.initialise(0);
2302 Vector<int> vec_offset(Internal_nblock_types, 0);
2303 for (
unsigned b = 1; b < Internal_nblock_types; ++b)
2305 vec_offset[b] = vec_offset[b - 1] +
2306 Internal_block_distribution_pt[b - 1]->nrow_local();
2310 for (
unsigned p = 0; p < nproc; p++)
2313 Rows_to_recv_for_get_ordered[p] =
2314 new int[Nrows_to_recv_for_get_ordered[p]];
2315 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2317 for (
unsigned i = 0;
i < Nrows_to_recv_for_get_block(b, p);
i++)
2319 Rows_to_recv_for_get_ordered[p][pt] =
2320 Rows_to_recv_for_get_block(b, p)[
i] + vec_offset[b];
2327 for (
unsigned p = 0; p < nproc; p++)
2331 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2333 delete[] block_rows_to_send(b, p);
2335 if (Nrows_to_send_for_get_ordered[p] > 0)
2337 delete[] ordered_rows_to_send[p];
2343 unsigned n_req_send_nrow = send_requests_nrow.size();
2344 if (n_req_send_nrow)
2346 MPI_Waitall(n_req_send_nrow, &send_requests_nrow[0], MPI_STATUS_IGNORE);
2348 for (
unsigned p = 0; p < nproc; p++)
2350 delete[] nrows_to_send[p];
2356 if (block_output_on()) output_blocks_to_files(Output_base_filename);
2375 template<
typename MATRIX>
2382 unsigned doftype_in_master_preconditioner_coarse_size =
2383 doftype_in_master_preconditioner_coarse.size();
2385 for (
unsigned dof_i = 0;
2386 dof_i < doftype_in_master_preconditioner_coarse_size;
2392 doftype_coarsen_map_coarse.push_back(tmp_vec);
2396 turn_into_subsidiary_block_preconditioner(
2397 master_block_prec_pt,
2398 doftype_in_master_preconditioner_coarse,
2399 doftype_coarsen_map_coarse);
2454 template<
typename MATRIX>
2461 Master_block_preconditioner_pt = master_block_prec_pt;
2464 Doftype_coarsen_map_coarse = doftype_coarsen_map_coarse;
2466 Doftype_in_master_preconditioner_coarse =
2467 doftype_in_master_preconditioner_coarse;
2482 template<
typename MATRIX>
2488 if (this->is_master_block_preconditioner())
2490 std::ostringstream err_msg;
2491 unsigned n = nmesh();
2494 err_msg <<
"No meshes have been set for this block preconditioner!\n"
2495 <<
"Set one with set_nmesh(...), set_mesh(...)" << std::endl;
2497 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2498 for (
unsigned m = 0; m < n; m++)
2500 if (Mesh_pt[m] == 0)
2502 err_msg <<
"The mesh pointer to mesh " << m <<
" is null!\n"
2503 <<
"Set a non-null one with set_mesh(...)" << std::endl;
2505 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2513 unsigned internal_n_dof_types = ndof_types();
2517 Vector<unsigned> dof_to_block_lookup(internal_n_dof_types);
2518 for (
unsigned i = 0;
i < internal_n_dof_types;
i++)
2520 dof_to_block_lookup[
i] =
i;
2524 this->block_setup(dof_to_block_lookup);
2536 template<
typename MATRIX>
2542 const unsigned n_block_types = nblock_types();
2546 if ((required_blocks.
nrow() != n_block_types) ||
2547 (required_blocks.
ncol() != n_block_types))
2549 std::ostringstream error_message;
2550 error_message <<
"The size of the matrix of bools required_blocks "
2551 <<
"(which indicates which blocks are required) is not the "
2552 <<
"right size, required_blocks is "
2553 << required_blocks.
ncol() <<
" x " << required_blocks.
nrow()
2554 <<
", whereas it should "
2555 <<
"be " << n_block_types <<
" x " << n_block_types;
2557 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2561 if ((block_matrix_pt.
nrow() != n_block_types) ||
2562 (block_matrix_pt.
ncol() != n_block_types))
2564 std::ostringstream error_message;
2565 error_message <<
"The size of the block matrix pt is not the "
2566 <<
"right size, block_matrix_pt is "
2567 << block_matrix_pt.
ncol() <<
" x " << block_matrix_pt.
nrow()
2568 <<
", whereas it should "
2569 <<
"be " << n_block_types <<
" x " << n_block_types;
2571 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2577 for (
unsigned i = 0;
i < n_block_types;
i++)
2579 for (
unsigned j = 0; j < n_block_types; j++)
2582 if (required_blocks(
i, j))
2585 block_matrix_pt(
i, j) =
new MATRIX;
2586 get_block(
i, j, *block_matrix_pt(
i, j));
2592 block_matrix_pt(
i, j) = 0;
2607 template<
typename MATRIX>
2618 std::ostringstream err_msg;
2619 err_msg <<
"The distribution of the global vector v must be setup.";
2621 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2630 std::ostringstream err_msg;
2631 err_msg <<
"The distribution of the global vector v must match the "
2632 <<
" specified master_distribution_pt(). \n"
2633 <<
"i.e. Distribution_pt in the master preconditioner";
2635 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2640 const unsigned para_nblock_types = nblock_types();
2641 const unsigned para_block_vec_number_size = block_vec_number.size();
2642 if (para_block_vec_number_size > para_nblock_types)
2644 std::ostringstream err_msg;
2645 err_msg <<
"You have requested " << para_block_vec_number_size
2646 <<
" number of blocks, (block_vec_number.size() is "
2647 << para_block_vec_number_size <<
").\n"
2648 <<
"But there are only " << para_nblock_types
2649 <<
" nblock_types.\n"
2650 <<
"Please make sure that block_vec_number is correctly sized.\n";
2652 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2659 for (
unsigned i = 0;
i < para_block_vec_number_size;
i++)
2661 const unsigned para_required_block = block_vec_number[
i];
2662 if (para_required_block >= para_nblock_types)
2664 std::ostringstream err_msg;
2665 err_msg <<
"block_vec_number[" <<
i <<
"] is " << para_required_block
2667 <<
"But there are only " << para_nblock_types
2668 <<
" nblock_types.\n";
2670 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2675 std::set<unsigned> para_set;
2676 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2678 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
2679 para_set_ret = para_set.insert(block_vec_number[b]);
2681 if (!para_set_ret.second)
2683 std::ostringstream err_msg;
2684 err_msg <<
"Error: the block number " << block_vec_number[b]
2685 <<
" appears twice.\n";
2686 throw OomphLibError(
2687 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2693 const unsigned n_block = block_vec_number.size();
2702 Vector<unsigned> most_fine_grain_dof;
2703 for (
unsigned b = 0; b < n_block; b++)
2705 const unsigned mapped_b = block_vec_number[b];
2706 most_fine_grain_dof.insert(most_fine_grain_dof.end(),
2707 Block_to_dof_map_fine[mapped_b].begin(),
2708 Block_to_dof_map_fine[mapped_b].end());
2712 Vector<DoubleVector> dof_block_vector;
2713 internal_get_block_vectors(most_fine_grain_dof, v, dof_block_vector);
2724 std::map<Vector<unsigned>, LinearAlgebraDistribution*>::const_iterator iter;
2728 iter = Auxiliary_block_distribution_pt.find(block_vec_number);
2730 if (iter != Auxiliary_block_distribution_pt.end())
2734 w.
build(iter->second);
2740 Vector<LinearAlgebraDistribution*> tmp_vec_dist_pt(n_block, 0);
2741 for (
unsigned b = 0; b < n_block; b++)
2743 tmp_vec_dist_pt[b] = Block_distribution_pt[block_vec_number[b]];
2748 LinearAlgebraDistribution* tmp_dist_pt =
new LinearAlgebraDistribution;
2753 insert_auxiliary_block_distribution(block_vec_number, tmp_dist_pt);
2756 w.
build(tmp_dist_pt);
2773 template<
typename MATRIX>
2784 std::ostringstream err_msg;
2785 err_msg <<
"The distribution of the global vector v must be setup.";
2787 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2796 std::ostringstream err_msg;
2797 err_msg <<
"The distribution of the global vector v must match the "
2798 <<
" specified master_distribution_pt(). \n"
2799 <<
"i.e. Distribution_pt in the master preconditioner";
2801 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2806 const unsigned para_block_vec_number_size = block_vec_number.size();
2807 const unsigned para_n_block = nblock_types();
2808 if (para_block_vec_number_size > para_n_block)
2810 std::ostringstream err_msg;
2811 err_msg <<
"Trying to return " << para_block_vec_number_size
2812 <<
" block vectors.\n"
2813 <<
"But there are only " << para_n_block <<
" block types.\n";
2815 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2822 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2824 const unsigned para_required_block = block_vec_number[b];
2825 if (para_required_block > para_n_block)
2827 std::ostringstream err_msg;
2828 err_msg <<
"block_vec_number[" << b <<
"] is " << para_required_block
2830 <<
"But there are only " << para_n_block <<
" block types.\n";
2832 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2837 std::set<unsigned> para_set;
2838 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2840 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
2841 para_set_ret = para_set.insert(block_vec_number[b]);
2843 if (!para_set_ret.second)
2845 std::ostringstream err_msg;
2846 err_msg <<
"Error: the block number " << block_vec_number[b]
2847 <<
" appears twice.\n";
2848 throw OomphLibError(
2849 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2856 std::ostringstream err_msg;
2857 err_msg <<
"The distribution of the block vector w must be setup.";
2858 throw OomphLibError(
2859 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2866 Vector<LinearAlgebraDistribution*> para_vec_dist_pt(
2867 para_block_vec_number_size, 0);
2869 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2871 para_vec_dist_pt[b] = Block_distribution_pt[block_vec_number[b]];
2874 LinearAlgebraDistribution para_tmp_dist;
2881 std::ostringstream err_msg;
2882 err_msg <<
"The distribution of the block vector w does not match \n"
2883 <<
"the concatenation of the block distributions defined in \n"
2884 <<
"block_vec_number.\n";
2885 throw OomphLibError(
2886 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2891 const unsigned n_block = block_vec_number.size();
2900 Vector<unsigned> most_fine_grain_dof;
2901 for (
unsigned b = 0; b < n_block; b++)
2903 const unsigned mapped_b = block_vec_number[b];
2904 most_fine_grain_dof.insert(most_fine_grain_dof.end(),
2905 Block_to_dof_map_fine[mapped_b].begin(),
2906 Block_to_dof_map_fine[mapped_b].end());
2911 const unsigned ndof = most_fine_grain_dof.size();
2914 Vector<DoubleVector> dof_vector(ndof);
2915 for (
unsigned d = 0; d < ndof; d++)
2917 dof_vector[d].build(
2918 internal_block_distribution_pt(most_fine_grain_dof[d]));
2925 internal_return_block_vectors(most_fine_grain_dof, dof_vector, v);
2938 template<
typename MATRIX>
2949 std::ostringstream err_msg;
2950 err_msg <<
"The distribution of the global vector v must be setup.";
2952 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2961 std::ostringstream err_msg;
2962 err_msg <<
"The distribution of the global vector v must match the "
2963 <<
" specified master_distribution_pt(). \n"
2964 <<
"i.e. Distribution_pt in the master preconditioner";
2966 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2971 const unsigned para_nblock_types = nblock_types();
2972 const unsigned para_block_vec_number_size = block_vec_number.size();
2973 if (para_block_vec_number_size > para_nblock_types)
2975 std::ostringstream err_msg;
2976 err_msg <<
"You have requested " << para_block_vec_number_size
2977 <<
" number of blocks, (block_vec_number.size() is "
2978 << para_block_vec_number_size <<
").\n"
2979 <<
"But there are only " << para_nblock_types
2980 <<
" nblock_types.\n"
2981 <<
"Please make sure that block_vec_number is correctly sized.\n";
2983 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2990 for (
unsigned i = 0;
i < para_block_vec_number_size;
i++)
2992 const unsigned para_required_block = block_vec_number[
i];
2993 if (para_required_block > para_nblock_types)
2995 std::ostringstream err_msg;
2996 err_msg <<
"block_vec_number[" <<
i <<
"] is " << para_required_block
2998 <<
"But there are only " << para_nblock_types
2999 <<
" nblock_types.\n";
3001 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3005 std::set<unsigned> para_set;
3006 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3008 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
3009 para_set_ret = para_set.insert(block_vec_number[b]);
3011 if (!para_set_ret.second)
3013 std::ostringstream err_msg;
3014 err_msg <<
"Error: the block number " << block_vec_number[b]
3015 <<
" appears twice.\n";
3017 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3023 const unsigned n_block = block_vec_number.size();
3034 for (
unsigned b = 0; b < n_block; b++)
3036 const unsigned mapped_b = block_vec_number[b];
3038 most_fine_grain_dof.insert(most_fine_grain_dof.end(),
3039 Block_to_dof_map_fine[mapped_b].begin(),
3040 Block_to_dof_map_fine[mapped_b].end());
3045 internal_get_block_vectors(most_fine_grain_dof, v, dof_vector);
3054 unsigned offset = 0;
3056 for (
unsigned b = 0; b < n_block; b++)
3059 const unsigned mapped_b = block_vec_number[b];
3062 const unsigned n_dof = Block_to_dof_map_fine[mapped_b].size();
3067 s[b] = dof_vector[offset];
3072 s[b].build(Block_distribution_pt[mapped_b], 0);
3074 for (
unsigned vec_i = 0; vec_i < n_dof; vec_i++)
3076 tmp_vec_pt[vec_i] = &dof_vector[offset + vec_i];
3101 template<
typename MATRIX>
3106 const unsigned n_block = nblock_types();
3110 for (
unsigned i = 0;
i < n_block;
i++)
3112 required_block[
i] =
i;
3116 get_block_vectors(required_block, v,
s);
3130 template<
typename MATRIX>
3139 std::ostringstream error_message;
3140 error_message <<
"The distribution of the global vector v must be setup.";
3142 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3146 std::ostringstream error_message;
3147 error_message <<
"The distribution of the global vector v must match the "
3148 <<
" specified master_distribution_pt(). \n"
3149 <<
"i.e. Distribution_pt in the master preconditioner";
3151 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3157 const unsigned nblock = block_vec_number.size();
3162 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
3163 !this->distribution_pt()->distributed())
3172 for (
unsigned b = 0; b < nblock; b++)
3174 const unsigned required_block = block_vec_number[b];
3175 s[b].build(Internal_block_distribution_pt[required_block], 0.0);
3176 double* s_pt =
s[b].values_pt();
3177 unsigned nrow =
s[b].nrow();
3178 for (
unsigned i = 0;
i < nrow;
i++)
3180 s_pt[
i] = v_pt[this->Global_index[required_block][
i]];
3187 #ifdef OOMPH_HAS_MPI
3189 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
3192 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
3196 for (
unsigned b = 0; b < nblock; b++)
3198 const unsigned required_block = block_vec_number[b];
3199 s[b].build(Internal_block_distribution_pt[required_block], 0.0);
3207 unsigned max_n_send_or_recv = 0;
3208 for (
unsigned p = 0; p < nproc; p++)
3210 for (
unsigned b = 0; b < nblock; b++)
3212 const unsigned required_block = block_vec_number[b];
3213 max_n_send_or_recv = std::max(
3214 max_n_send_or_recv, Nrows_to_send_for_get_block(required_block, p));
3215 max_n_send_or_recv = std::max(
3216 max_n_send_or_recv, Nrows_to_recv_for_get_block(required_block, p));
3217 if (Nrows_to_send_for_get_block(required_block, p) > 0)
3221 if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3230 int* block_lengths =
new int[max_n_send_or_recv];
3231 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
3233 block_lengths[
i] = 1;
3238 for (
unsigned p = 0; p < nproc; p++)
3244 if (nblock_send[p] > 0)
3247 MPI_Datatype block_send_types[nblock_send[p]];
3251 for (
unsigned b = 0; b < nblock; b++)
3253 const unsigned required_block = block_vec_number[b];
3255 if (Nrows_to_send_for_get_block(required_block, p) > 0)
3257 MPI_Type_indexed(Nrows_to_send_for_get_block(required_block, p),
3259 Rows_to_send_for_get_block(required_block, p),
3261 &block_send_types[ptr]);
3262 MPI_Type_commit(&block_send_types[ptr]);
3268 MPI_Aint displacements[nblock_send[p]];
3269 int lengths[nblock_send[p]];
3270 for (
int i = 0;
i < nblock_send[p];
i++)
3273 displacements[
i] = 0;
3277 MPI_Datatype type_send;
3278 MPI_Type_create_struct(nblock_send[p],
3283 MPI_Type_commit(&type_send);
3286 MPI_Request send_req;
3287 MPI_Isend(
const_cast<double*
>(v.
values_pt()),
3292 this->distribution_pt()->communicator_pt()->mpi_comm(),
3294 MPI_Type_free(&type_send);
3295 for (
int i = 0;
i < nblock_send[p];
i++)
3297 MPI_Type_free(&block_send_types[
i]);
3299 requests.push_back(send_req);
3303 if (nblock_recv[p] > 0)
3306 MPI_Datatype block_recv_types[nblock_recv[p]];
3309 MPI_Aint displacements[nblock_recv[p]];
3312 int lengths[nblock_recv[p]];
3315 MPI_Aint displacements_base;
3316 MPI_Get_address(
s[0].values_pt(), &displacements_base);
3320 for (
unsigned b = 0; b < nblock; b++)
3322 const unsigned required_block = block_vec_number[b];
3324 if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3326 MPI_Type_indexed(Nrows_to_recv_for_get_block(required_block, p),
3328 Rows_to_recv_for_get_block(required_block, p),
3330 &block_recv_types[ptr]);
3331 MPI_Type_commit(&block_recv_types[ptr]);
3332 MPI_Get_address(
s[b].values_pt(), &displacements[ptr]);
3333 displacements[ptr] -= displacements_base;
3340 MPI_Datatype type_recv;
3341 MPI_Type_create_struct(nblock_recv[p],
3346 MPI_Type_commit(&type_recv);
3349 MPI_Request recv_req;
3350 MPI_Irecv(
s[0].values_pt(),
3355 this->distribution_pt()->communicator_pt()->mpi_comm(),
3357 MPI_Type_free(&type_recv);
3358 for (
int i = 0;
i < nblock_recv[p];
i++)
3360 MPI_Type_free(&block_recv_types[
i]);
3362 requests.push_back(recv_req);
3369 const double* v_values_pt = v.
values_pt();
3370 for (
unsigned b = 0; b < nblock; b++)
3372 const unsigned required_block = block_vec_number[b];
3374 double* w_values_pt =
s[b].values_pt();
3375 for (
unsigned i = 0;
3376 i < Nrows_to_send_for_get_block(required_block, p);
3379 w_values_pt[Rows_to_recv_for_get_block(required_block, p)[
i]] =
3380 v_values_pt[Rows_to_send_for_get_block(required_block, p)[
i]];
3387 unsigned c = requests.size();
3391 MPI_Waitall(c, &requests[0], &stat[0]);
3393 delete[] block_lengths;
3397 std::ostringstream error_message;
3398 error_message <<
"The preconditioner is distributed and on more than one "
3399 <<
"processor. MPI is required.";
3401 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3419 template<
typename MATRIX>
3424 const unsigned nblock = this->internal_nblock_types();
3426 for (
unsigned b = 0; b < nblock; b++)
3428 block_vec_number[b] = b;
3431 internal_get_block_vectors(block_vec_number, v,
s);
3442 template<
typename MATRIX>
3453 std::ostringstream err_msg;
3454 err_msg <<
"The distribution of the global vector v must be setup.";
3456 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3465 std::ostringstream err_msg;
3466 err_msg <<
"The distribution of the global vector v must match the "
3467 <<
" specified master_distribution_pt(). \n"
3468 <<
"i.e. Distribution_pt in the master preconditioner";
3470 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3475 const unsigned para_block_vec_number_size = block_vec_number.size();
3476 const unsigned para_s_size =
s.size();
3477 if (para_block_vec_number_size != para_s_size)
3479 std::ostringstream err_msg;
3480 err_msg <<
"block_vec_number.size() is " << para_block_vec_number_size
3482 <<
"s.size() is " << para_s_size <<
".\n"
3483 <<
"But they must be the same size!\n";
3485 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3490 const unsigned para_n_block = nblock_types();
3491 if (para_block_vec_number_size > para_n_block)
3493 std::ostringstream err_msg;
3494 err_msg <<
"Trying to return " << para_block_vec_number_size
3495 <<
" block vectors.\n"
3496 <<
"But there are only " << para_n_block <<
" block types.\n";
3498 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3505 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3507 const unsigned para_required_block = block_vec_number[b];
3508 if (para_required_block > para_n_block)
3510 std::ostringstream err_msg;
3511 err_msg <<
"block_vec_number[" << b <<
"] is " << para_required_block
3513 <<
"But there are only " << para_n_block <<
" block types.\n";
3515 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3520 std::set<unsigned> para_set;
3521 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3523 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
3524 para_set_ret = para_set.insert(block_vec_number[b]);
3526 if (!para_set_ret.second)
3528 std::ostringstream err_msg;
3529 err_msg <<
"Error: the block number " << block_vec_number[b]
3530 <<
" appears twice.\n";
3532 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3538 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3542 std::ostringstream err_msg;
3543 err_msg <<
"The distribution of the block vector s[" << b
3544 <<
"] must be setup.\n";
3546 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3553 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3555 if (*(
s[b].distribution_pt()) !=
3556 *(Block_distribution_pt[block_vec_number[b]]))
3558 std::ostringstream error_message;
3560 <<
"The distribution of the block vector " << b <<
" must match the"
3561 <<
" specified distribution at "
3562 <<
"Block_distribution_pt[" << block_vec_number[b] <<
"].\n"
3563 <<
"The distribution of the Block_distribution_pt is determined by\n"
3564 <<
"the vector block_vec_number. Perhaps it is incorrect?\n";
3566 OOMPH_CURRENT_FUNCTION,
3567 OOMPH_EXCEPTION_LOCATION);
3573 const unsigned n_block = block_vec_number.size();
3583 for (
unsigned b = 0; b < n_block; b++)
3585 const unsigned mapped_b = block_vec_number[b];
3587 most_fine_grain_dof.insert(most_fine_grain_dof.end(),
3588 Block_to_dof_map_fine[mapped_b].begin(),
3589 Block_to_dof_map_fine[mapped_b].end());
3595 unsigned offset = 0;
3598 for (
unsigned b = 0; b < n_block; b++)
3601 const unsigned mapped_b = block_vec_number[b];
3604 const unsigned ndof = Block_to_dof_map_fine[mapped_b].size();
3609 dof_vector[offset] =
s[b];
3617 for (
unsigned d = 0; d < ndof; d++)
3619 const unsigned offset_plus_d = offset + d;
3622 dof_vector[offset_plus_d].build(
3623 Internal_block_distribution_pt[most_fine_grain_dof[offset_plus_d]]);
3626 tmp_dof_vector_pt[d] = &dof_vector[offset_plus_d];
3639 internal_return_block_vectors(most_fine_grain_dof, dof_vector, v);
3654 template<
typename MATRIX>
3659 const unsigned n_block = nblock_types();
3663 for (
unsigned i = 0;
i < n_block;
i++)
3665 required_block[
i] =
i;
3669 return_block_vectors(required_block,
s, v);
3683 template<
typename MATRIX>
3690 const unsigned nblock = block_vec_number.size();
3695 std::ostringstream error_message;
3696 error_message <<
"The distribution of the global vector v must be setup.";
3698 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3702 std::ostringstream error_message;
3703 error_message <<
"The distribution of the global vector v must match the "
3704 <<
" specified master_distribution_pt(). \n"
3705 <<
"i.e. Distribution_pt in the master preconditioner";
3707 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3709 for (
unsigned b = 0; b < nblock; b++)
3713 std::ostringstream error_message;
3714 error_message <<
"The distribution of the block vector " << b
3715 <<
" must be setup.";
3717 OOMPH_CURRENT_FUNCTION,
3718 OOMPH_EXCEPTION_LOCATION);
3720 const unsigned required_block = block_vec_number[b];
3721 if (*(
s[b].distribution_pt()) !=
3722 *(Internal_block_distribution_pt[required_block]))
3724 std::ostringstream error_message;
3726 <<
"The distribution of the block vector " << b <<
" must match the"
3727 <<
" specified distribution at Internal_block_distribution_pt[" << b
3730 OOMPH_CURRENT_FUNCTION,
3731 OOMPH_EXCEPTION_LOCATION);
3739 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
3740 !this->distribution_pt()->distributed())
3743 for (
unsigned b = 0; b < nblock; b++)
3745 const unsigned required_block = block_vec_number[b];
3747 const double* s_pt =
s[b].values_pt();
3748 unsigned nrow = this->internal_block_dimension(required_block);
3749 for (
unsigned i = 0;
i < nrow;
i++)
3751 v_pt[this->Global_index[required_block][
i]] = s_pt[
i];
3758 #ifdef OOMPH_HAS_MPI
3761 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
3764 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
3771 unsigned max_n_send_or_recv = 0;
3772 for (
unsigned p = 0; p < nproc; p++)
3774 for (
unsigned b = 0; b < nblock; b++)
3776 const unsigned required_block = block_vec_number[b];
3778 max_n_send_or_recv = std::max(
3779 max_n_send_or_recv, Nrows_to_send_for_get_block(required_block, p));
3780 max_n_send_or_recv = std::max(
3781 max_n_send_or_recv, Nrows_to_recv_for_get_block(required_block, p));
3782 if (Nrows_to_send_for_get_block(required_block, p) > 0)
3786 if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3795 int* block_lengths =
new int[max_n_send_or_recv];
3796 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
3798 block_lengths[
i] = 1;
3803 for (
unsigned p = 0; p < nproc; p++)
3809 if (nblock_recv[p] > 0)
3812 MPI_Datatype block_recv_types[nblock_recv[p]];
3816 for (
unsigned b = 0; b < nblock; b++)
3818 const unsigned required_block = block_vec_number[b];
3820 if (Nrows_to_send_for_get_block(required_block, p) > 0)
3822 MPI_Type_indexed(Nrows_to_send_for_get_block(required_block, p),
3824 Rows_to_send_for_get_block(required_block, p),
3826 &block_recv_types[ptr]);
3827 MPI_Type_commit(&block_recv_types[ptr]);
3833 MPI_Aint displacements[nblock_recv[p]];
3834 int lengths[nblock_recv[p]];
3835 for (
int i = 0;
i < nblock_recv[p];
i++)
3838 displacements[
i] = 0;
3842 MPI_Datatype type_recv;
3843 MPI_Type_create_struct(nblock_recv[p],
3848 MPI_Type_commit(&type_recv);
3851 MPI_Request recv_req;
3857 this->distribution_pt()->communicator_pt()->mpi_comm(),
3859 MPI_Type_free(&type_recv);
3860 for (
int i = 0;
i < nblock_recv[p];
i++)
3862 MPI_Type_free(&block_recv_types[
i]);
3864 requests.push_back(recv_req);
3868 if (nblock_send[p] > 0)
3871 MPI_Datatype block_send_types[nblock_send[p]];
3874 MPI_Aint displacements[nblock_send[p]];
3877 int lengths[nblock_send[p]];
3880 MPI_Aint displacements_base;
3881 MPI_Get_address(
const_cast<double*
>(
s[0].values_pt()),
3882 &displacements_base);
3886 for (
unsigned b = 0; b < nblock; b++)
3888 const unsigned required_block = block_vec_number[b];
3890 if (Nrows_to_recv_for_get_block(required_block, p) > 0)
3892 MPI_Type_indexed(Nrows_to_recv_for_get_block(required_block, p),
3894 Rows_to_recv_for_get_block(required_block, p),
3896 &block_send_types[ptr]);
3897 MPI_Type_commit(&block_send_types[ptr]);
3898 MPI_Get_address(
const_cast<double*
>(
s[b].values_pt()),
3899 &displacements[ptr]);
3900 displacements[ptr] -= displacements_base;
3907 MPI_Datatype type_send;
3908 MPI_Type_create_struct(nblock_send[p],
3913 MPI_Type_commit(&type_send);
3916 MPI_Request send_req;
3917 MPI_Isend(
const_cast<double*
>(
s[0].values_pt()),
3922 this->distribution_pt()->communicator_pt()->mpi_comm(),
3924 MPI_Type_free(&type_send);
3925 for (
int i = 0;
i < nblock_send[p];
i++)
3927 MPI_Type_free(&block_send_types[
i]);
3929 requests.push_back(send_req);
3937 for (
unsigned b = 0; b < nblock; b++)
3939 const unsigned required_block = block_vec_number[b];
3941 const double* w_values_pt =
s[b].values_pt();
3942 for (
unsigned i = 0;
3943 i < Nrows_to_send_for_get_block(required_block, p);
3946 v_values_pt[Rows_to_send_for_get_block(required_block, p)[
i]] =
3947 w_values_pt[Rows_to_recv_for_get_block(required_block, p)[
i]];
3954 unsigned c = requests.size();
3958 MPI_Waitall(c, &requests[0], &stat[0]);
3960 delete[] block_lengths;
3964 std::ostringstream error_message;
3965 error_message <<
"The preconditioner is distributed and on more than one "
3966 <<
"processor. MPI is required.";
3968 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
3986 template<
typename MATRIX>
3991 const unsigned nblock = this->internal_nblock_types();
3993 for (
unsigned b = 0; b < nblock; b++)
3995 block_vec_number[b] = b;
3998 internal_return_block_vectors(block_vec_number,
s, v);
4008 template<
typename MATRIX>
4014 const unsigned n_blocks = this->internal_nblock_types();
4019 std::ostringstream error_message;
4021 <<
"Requested block vector " << b
4022 <<
", however this preconditioner has internal_nblock_types() "
4023 <<
"= " << internal_nblock_types() << std::endl;
4025 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4029 std::ostringstream error_message;
4030 error_message <<
"The distribution of the global vector v must be setup.";
4032 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4036 std::ostringstream error_message;
4037 error_message <<
"The distribution of the global vector v must match the "
4038 <<
" specified master_distribution_pt(). \n"
4039 <<
"i.e. Distribution_pt in the master preconditioner";
4041 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4046 w.
build(Internal_block_distribution_pt[b], 0.0);
4051 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4052 !this->distribution_pt()->distributed())
4056 unsigned n_row = w.
nrow();
4057 for (
unsigned i = 0;
i < n_row;
i++)
4059 w_pt[
i] = v_pt[this->Global_index[b][
i]];
4065 #ifdef OOMPH_HAS_MPI
4068 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4071 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4074 unsigned max_n_send_or_recv = 0;
4075 for (
unsigned p = 0; p < nproc; p++)
4077 max_n_send_or_recv =
4078 std::max(max_n_send_or_recv, Nrows_to_send_for_get_block(b, p));
4079 max_n_send_or_recv =
4080 std::max(max_n_send_or_recv, Nrows_to_recv_for_get_block(b, p));
4085 int* block_lengths =
new int[max_n_send_or_recv];
4086 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4088 block_lengths[
i] = 1;
4093 for (
unsigned p = 0; p < nproc; p++)
4098 if (Nrows_to_send_for_get_block(b, p) > 0)
4101 MPI_Datatype type_send;
4102 MPI_Type_indexed(Nrows_to_send_for_get_block(b, p),
4104 Rows_to_send_for_get_block(b, p),
4107 MPI_Type_commit(&type_send);
4110 MPI_Request send_req;
4111 MPI_Isend(
const_cast<double*
>(v.
values_pt()),
4116 this->distribution_pt()->communicator_pt()->mpi_comm(),
4118 MPI_Type_free(&type_send);
4119 requests.push_back(send_req);
4122 if (Nrows_to_recv_for_get_block(b, p) > 0)
4125 MPI_Datatype type_recv;
4126 MPI_Type_indexed(Nrows_to_recv_for_get_block(b, p),
4128 Rows_to_recv_for_get_block(b, p),
4131 MPI_Type_commit(&type_recv);
4134 MPI_Request recv_req;
4140 this->distribution_pt()->communicator_pt()->mpi_comm(),
4142 MPI_Type_free(&type_recv);
4143 requests.push_back(recv_req);
4151 const double* v_values_pt = v.
values_pt();
4152 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(b, p);
i++)
4154 w_values_pt[Rows_to_recv_for_get_block(b, p)[
i]] =
4155 v_values_pt[Rows_to_send_for_get_block(b, p)[
i]];
4161 unsigned c = requests.size();
4165 MPI_Waitall(c, &requests[0], &stat[0]);
4167 delete[] block_lengths;
4171 std::ostringstream error_message;
4172 error_message <<
"The preconditioner is distributed and on more than one "
4173 <<
"processor. MPI is required.";
4175 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4185 template<
typename MATRIX>
4192 const unsigned para_n_blocks = nblock_types();
4195 if (b >= para_n_blocks)
4197 std::ostringstream err_msg;
4198 err_msg <<
"Requested block vector " << b
4199 <<
", however this preconditioner has only " << para_n_blocks
4203 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4208 std::ostringstream err_msg;
4209 err_msg <<
"The distribution of the global vector v must be setup.";
4211 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4215 std::ostringstream err_msg;
4216 err_msg <<
"The distribution of the global vector v must match the "
4217 <<
" specified master_distribution_pt(). \n"
4218 <<
"i.e. Distribution_pt in the master preconditioner";
4220 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4256 const unsigned n_dof_vec = most_fine_grain_dof.size();
4261 internal_get_block_vector(most_fine_grain_dof[0], v, w);
4269 internal_get_block_vectors(most_fine_grain_dof, v, dof_vector);
4271 w.
build(Block_distribution_pt[b], 0);
4292 template<
typename MATRIX>
4298 const unsigned n_blocks = this->internal_nblock_types();
4303 std::ostringstream error_message;
4305 <<
"Requested block vector " << b
4306 <<
", however this preconditioner has internal_nblock_types() "
4307 <<
"= " << internal_nblock_types() << std::endl;
4309 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4313 std::ostringstream error_message;
4314 error_message <<
"The distribution of the global vector v must be setup.";
4316 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4320 std::ostringstream error_message;
4321 error_message <<
"The distribution of the global vector v must match the "
4322 <<
" specified master_distribution_pt(). \n"
4323 <<
"i.e. Distribution_pt in the master preconditioner";
4325 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4329 std::ostringstream error_message;
4330 error_message <<
"The distribution of the block vector w must be setup.";
4332 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4336 std::ostringstream error_message;
4338 <<
"The distribution of the block vector w must match the "
4339 <<
" specified distribution at Internal_block_distribution_pt[b]";
4341 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4348 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4349 !this->distribution_pt()->distributed())
4352 unsigned n_row = this->internal_block_dimension(b);
4357 for (
unsigned i = 0;
i < n_row;
i++)
4359 v_pt[this->Global_index[b][
i]] = w_pt[
i];
4365 #ifdef OOMPH_HAS_MPI
4368 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4371 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4374 unsigned max_n_send_or_recv = 0;
4375 for (
unsigned p = 0; p < nproc; p++)
4377 max_n_send_or_recv =
4378 std::max(max_n_send_or_recv, Nrows_to_send_for_get_block(b, p));
4379 max_n_send_or_recv =
4380 std::max(max_n_send_or_recv, Nrows_to_recv_for_get_block(b, p));
4385 int* block_lengths =
new int[max_n_send_or_recv];
4386 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4388 block_lengths[
i] = 1;
4393 for (
unsigned p = 0; p < nproc; p++)
4398 if (Nrows_to_recv_for_get_block(b, p) > 0)
4401 MPI_Datatype type_send;
4402 MPI_Type_indexed(Nrows_to_recv_for_get_block(b, p),
4404 Rows_to_recv_for_get_block(b, p),
4407 MPI_Type_commit(&type_send);
4410 MPI_Request send_req;
4411 MPI_Isend(
const_cast<double*
>(w.
values_pt()),
4416 this->distribution_pt()->communicator_pt()->mpi_comm(),
4418 MPI_Type_free(&type_send);
4419 requests.push_back(send_req);
4422 if (Nrows_to_send_for_get_block(b, p) > 0)
4425 MPI_Datatype type_recv;
4426 MPI_Type_indexed(Nrows_to_send_for_get_block(b, p),
4428 Rows_to_send_for_get_block(b, p),
4431 MPI_Type_commit(&type_recv);
4434 MPI_Request recv_req;
4440 this->distribution_pt()->communicator_pt()->mpi_comm(),
4442 MPI_Type_free(&type_recv);
4443 requests.push_back(recv_req);
4450 const double* w_values_pt = w.
values_pt();
4452 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(b, p);
i++)
4454 v_values_pt[Rows_to_send_for_get_block(b, p)[
i]] =
4455 w_values_pt[Rows_to_recv_for_get_block(b, p)[
i]];
4461 unsigned c = requests.size();
4465 MPI_Waitall(c, &requests[0], &stat[0]);
4467 delete[] block_lengths;
4471 std::ostringstream error_message;
4472 error_message <<
"The preconditioner is distributed and on more than one "
4473 <<
"processor. MPI is required.";
4475 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4488 template<
typename MATRIX>
4495 const unsigned para_n_blocks = nblock_types();
4498 if (n >= para_n_blocks)
4500 std::ostringstream err_msg;
4501 err_msg <<
"Requested block vector " << b
4502 <<
", however this preconditioner has " << para_n_blocks
4503 <<
" block types.\n";
4505 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4509 std::ostringstream err_msg;
4510 err_msg <<
"The distribution of the global vector v must be setup.";
4512 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4516 std::ostringstream err_msg;
4517 err_msg <<
"The distribution of the global vector v must match the "
4518 <<
" specified master_distribution_pt(). \n"
4519 <<
"i.e. Distribution_pt in the master preconditioner";
4521 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4525 std::ostringstream err_msg;
4526 err_msg <<
"The distribution of the block vector b must be setup.";
4528 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4537 const unsigned n_dof_vec = Block_to_dof_map_fine[n].size();
4542 internal_return_block_vector(most_fine_grain_dof[0], b, v);
4548 for (
unsigned d = 0; d < n_dof_vec; d++)
4550 dof_vector[d].build(
4551 internal_block_distribution_pt(most_fine_grain_dof[d]));
4557 internal_return_block_vectors(most_fine_grain_dof, dof_vector, v);
4597 template<
typename MATRIX>
4605 std::ostringstream error_message;
4606 error_message <<
"The distribution of the global vector v must be setup.";
4608 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4612 std::ostringstream error_message;
4613 error_message <<
"The distribution of the global vector v must match the "
4614 <<
" specified master_distribution_pt(). \n"
4615 <<
"i.e. Distribution_pt in the master preconditioner";
4617 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4622 w.
build(this->internal_preconditioner_matrix_distribution_pt(), 0.0);
4627 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4628 !this->distribution_pt()->distributed())
4631 unsigned nblock = this->Internal_nblock_types;
4634 unsigned block_offset = 0;
4637 for (
unsigned b = 0; b < nblock; b++)
4639 unsigned block_nrow = this->internal_block_dimension(b);
4640 for (
unsigned i = 0;
i < block_nrow;
i++)
4642 w_pt[block_offset +
i] = v_pt[this->Global_index[b][
i]];
4644 block_offset += block_nrow;
4650 #ifdef OOMPH_HAS_MPI
4653 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4656 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4659 unsigned max_n_send_or_recv = 0;
4660 for (
unsigned p = 0; p < nproc; p++)
4662 max_n_send_or_recv =
4663 std::max(max_n_send_or_recv, Nrows_to_send_for_get_ordered[p]);
4664 max_n_send_or_recv =
4665 std::max(max_n_send_or_recv, Nrows_to_recv_for_get_ordered[p]);
4670 int* block_lengths =
new int[max_n_send_or_recv];
4671 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4673 block_lengths[
i] = 1;
4678 for (
unsigned p = 0; p < nproc; p++)
4683 if (Nrows_to_send_for_get_ordered[p] > 0)
4686 MPI_Datatype type_send;
4687 MPI_Type_indexed(Nrows_to_send_for_get_ordered[p],
4689 Rows_to_send_for_get_ordered[p],
4692 MPI_Type_commit(&type_send);
4695 MPI_Request send_req;
4696 MPI_Isend(
const_cast<double*
>(v.
values_pt()),
4701 this->distribution_pt()->communicator_pt()->mpi_comm(),
4703 MPI_Type_free(&type_send);
4704 requests.push_back(send_req);
4707 if (Nrows_to_recv_for_get_ordered[p] > 0)
4710 MPI_Datatype type_recv;
4711 MPI_Type_indexed(Nrows_to_recv_for_get_ordered[p],
4713 Rows_to_recv_for_get_ordered[p],
4716 MPI_Type_commit(&type_recv);
4719 MPI_Request recv_req;
4725 this->distribution_pt()->communicator_pt()->mpi_comm(),
4727 MPI_Type_free(&type_recv);
4728 requests.push_back(recv_req);
4736 const double* v_values_pt = v.
values_pt();
4737 for (
unsigned i = 0;
i < Nrows_to_send_for_get_ordered[p];
i++)
4739 w_values_pt[Rows_to_recv_for_get_ordered[p][
i]] =
4740 v_values_pt[Rows_to_send_for_get_ordered[p][
i]];
4746 unsigned c = requests.size();
4750 MPI_Waitall(c, &requests[0], &stat[0]);
4752 delete[] block_lengths;
4756 std::ostringstream error_message;
4757 error_message <<
"The preconditioner is distributed and on more than one "
4758 <<
"processor. MPI is required.";
4760 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4792 template<
typename MATRIX>
4799 std::ostringstream error_message;
4800 error_message <<
"The distribution of the global vector v must be setup.";
4802 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4806 std::ostringstream error_message;
4807 error_message <<
"The distribution of the global vector v must match the "
4808 <<
" specified master_distribution_pt(). \n"
4809 <<
"i.e. Distribution_pt in the master preconditioner";
4811 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4816 unsigned nblocks = this->nblock_types();
4820 for (
unsigned b = 0; b < nblocks; b++)
4822 block_vec_number[b] = b;
4826 get_concatenated_block_vector(block_vec_number, v, w);
4848 template<
typename MATRIX>
4856 std::ostringstream error_message;
4857 error_message <<
"The distribution of the global vector v must be setup.";
4859 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4863 std::ostringstream error_message;
4864 error_message <<
"The distribution of the global vector v must match the "
4865 <<
" specified master_distribution_pt(). \n"
4866 <<
"i.e. Distribution_pt in the master preconditioner";
4868 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4872 std::ostringstream error_message;
4873 error_message <<
"The distribution of the block vector w must be setup.";
4875 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4878 *this->internal_preconditioner_matrix_distribution_pt())
4880 std::ostringstream error_message;
4881 error_message <<
"The distribution of the block vector w must match the "
4882 <<
" specified distribution at Distribution_pt[b]";
4884 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
4892 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4893 !this->distribution_pt()->distributed())
4896 unsigned nblock = this->Internal_nblock_types;
4899 unsigned block_offset = 0;
4902 for (
unsigned b = 0; b < nblock; b++)
4904 unsigned block_nrow = this->internal_block_dimension(b);
4905 for (
unsigned i = 0;
i < block_nrow;
i++)
4907 v_pt[this->Global_index[b][
i]] = w_pt[block_offset +
i];
4909 block_offset += block_nrow;
4915 #ifdef OOMPH_HAS_MPI
4918 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4921 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4924 unsigned max_n_send_or_recv = 0;
4925 for (
unsigned p = 0; p < nproc; p++)
4927 max_n_send_or_recv =
4928 std::max(max_n_send_or_recv, Nrows_to_send_for_get_ordered[p]);
4929 max_n_send_or_recv =
4930 std::max(max_n_send_or_recv, Nrows_to_recv_for_get_ordered[p]);
4935 int* block_lengths =
new int[max_n_send_or_recv];
4936 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4938 block_lengths[
i] = 1;
4943 for (
unsigned p = 0; p < nproc; p++)
4948 if (Nrows_to_recv_for_get_ordered[p] > 0)
4951 MPI_Datatype type_send;
4952 MPI_Type_indexed(Nrows_to_recv_for_get_ordered[p],
4954 Rows_to_recv_for_get_ordered[p],
4957 MPI_Type_commit(&type_send);
4960 MPI_Request send_req;
4961 MPI_Isend(
const_cast<double*
>(w.
values_pt()),
4966 this->distribution_pt()->communicator_pt()->mpi_comm(),
4968 MPI_Type_free(&type_send);
4969 requests.push_back(send_req);
4972 if (Nrows_to_send_for_get_ordered[p] > 0)
4975 MPI_Datatype type_recv;
4976 MPI_Type_indexed(Nrows_to_send_for_get_ordered[p],
4978 Rows_to_send_for_get_ordered[p],
4981 MPI_Type_commit(&type_recv);
4984 MPI_Request recv_req;
4990 this->distribution_pt()->communicator_pt()->mpi_comm(),
4992 MPI_Type_free(&type_recv);
4993 requests.push_back(recv_req);
5000 const double* w_values_pt = w.
values_pt();
5002 for (
unsigned i = 0;
i < Nrows_to_send_for_get_ordered[p];
i++)
5004 v_values_pt[Rows_to_send_for_get_ordered[p][
i]] =
5005 w_values_pt[Rows_to_recv_for_get_ordered[p][
i]];
5011 unsigned c = requests.size();
5015 MPI_Waitall(c, &requests[0], &stat[0]);
5017 delete[] block_lengths;
5021 std::ostringstream error_message;
5022 error_message <<
"The preconditioner is distributed and on more than one "
5023 <<
"processor. MPI is required.";
5025 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5045 template<
typename MATRIX>
5052 std::ostringstream error_message;
5053 error_message <<
"The distribution of the global vector v must be setup.";
5055 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5059 std::ostringstream error_message;
5060 error_message <<
"The distribution of the global vector v must match the "
5061 <<
" specified master_distribution_pt(). \n"
5062 <<
"i.e. Distribution_pt in the master preconditioner";
5064 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5068 std::ostringstream error_message;
5069 error_message <<
"The distribution of the block vector w must be setup.";
5071 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5073 if (*w.
distribution_pt() != *this->preconditioner_matrix_distribution_pt())
5075 std::ostringstream error_message;
5076 error_message <<
"The distribution of the block vector w must match the "
5077 <<
"concatenations of distributions in "
5078 <<
"Block_distribution_pt.\n";
5080 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5085 const unsigned nblocks = nblock_types();
5087 for (
unsigned b = 0; b < nblocks; b++)
5089 block_vec_number[b] = b;
5092 return_concatenated_block_vector(block_vec_number, w, v);
5102 const unsigned& block_i,
5103 const unsigned& block_j,
5108 const unsigned n_blocks = this->internal_nblock_types();
5111 if (block_i >= n_blocks || block_j >= n_blocks)
5113 std::ostringstream error_message;
5115 <<
"Requested block (" << block_i <<
"," << block_j
5116 <<
"), however this preconditioner has internal_nblock_types() "
5117 <<
"= " << internal_nblock_types() << std::endl;
5119 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5123 if (is_subsidiary_block_preconditioner())
5125 if (master_block_preconditioner_pt()->matrix_pt() != matrix_pt())
5127 std::string err =
"Master and subs should have same matrix.";
5129 err, OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5145 int* j_column_index;
5149 j_row_start = cr_matrix_pt->
row_start();
5151 j_value = cr_matrix_pt->
value();
5154 unsigned block_nrow = this->internal_block_dimension(block_i);
5155 unsigned block_ncol = this->internal_block_dimension(block_j);
5161 int* temp_row_start =
new int[block_nrow + 1];
5162 for (
unsigned i = 0;
i <= block_nrow;
i++)
5164 temp_row_start[
i] = 0;
5170 unsigned master_nrow = this->master_nrow();
5175 for (
unsigned k = 0; k < master_nrow; k++)
5177 if (internal_block_number(k) ==
static_cast<int>(block_i))
5179 for (
int l = j_row_start[k]; l < j_row_start[k + 1]; l++)
5181 if (internal_block_number(j_column_index[l]) ==
5182 static_cast<int>(block_j))
5185 temp_ptr[internal_index_in_block(k) + 1]++;
5192 int* temp_column_index =
new int[block_nnz];
5193 double* temp_value =
new double[block_nnz];
5198 temp_row_start[0] = 0;
5199 for (
unsigned k = 1; k <= block_nrow; k++)
5201 temp_row_start[k] = temp_row_start[k - 1] + temp_ptr[k];
5202 temp_ptr[k] = temp_row_start[k];
5207 for (
unsigned k = 0; k < master_nrow; k++)
5209 if (internal_block_number(k) ==
static_cast<int>(block_i))
5211 for (
int l = j_row_start[k]; l < j_row_start[k + 1]; l++)
5213 if (internal_block_number(j_column_index[l]) ==
5214 static_cast<int>(block_j))
5216 int kk = temp_ptr[internal_index_in_block(k)]++;
5217 temp_value[kk] = j_value[l];
5218 temp_column_index[kk] =
5219 internal_index_in_block(j_column_index[l]);
5230 output_block.
build(Internal_block_distribution_pt[block_i]);
5232 block_ncol, block_nnz, temp_value, temp_column_index, temp_row_start);
5237 if (Run_block_matrix_test)
5240 block_matrix_test(block_i, block_j, &output_block);
5249 #ifdef OOMPH_HAS_MPI
5251 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
5254 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
5257 int* j_row_start = cr_matrix_pt->
row_start();
5259 double* j_value = cr_matrix_pt->
value();
5275 unsigned nrow_local =
5276 Internal_block_distribution_pt[block_i]->nrow_local();
5282 for (
unsigned p = 0; p < nproc; p++)
5284 int nrow_send = Nrows_to_send_for_get_block(block_i, p);
5285 int nrow_recv = Nrows_to_recv_for_get_block(block_i, p);
5288 nnz_recv[p] =
new int[nrow_recv];
5291 if (nrow_send > 0 && p != my_rank)
5293 nnz_send[p] =
new int[nrow_send];
5298 for (
int i = 0;
i < nrow_send;
i++)
5300 unsigned row = Rows_to_send_for_get_block(block_i, p)[
i];
5302 for (
int r = j_row_start[row]; r < j_row_start[row + 1]; r++)
5304 if (internal_block_number(j_column_index[r]) ==
int(block_j))
5317 total_nnz_send[p] += c;
5326 MPI_Isend(nnz_send[p],
5331 this->distribution_pt()->communicator_pt()->mpi_comm(),
5333 send_req.push_back(req);
5340 MPI_Irecv(nnz_recv[p],
5345 this->distribution_pt()->communicator_pt()->mpi_comm(),
5347 recv1_req.push_back(req);
5354 for (
unsigned p = 0; p < nproc; p++)
5356 int nrow_send = Nrows_to_send_for_get_block(block_i, p);
5361 if (total_nnz_send[p] > 0)
5363 values_for_proc[p] =
new double[total_nnz_send[p]];
5364 column_index_for_proc[p] =
new int[total_nnz_send[p]];
5368 for (
int i = 0;
i < nrow_send;
i++)
5370 unsigned row = Rows_to_send_for_get_block(block_i, p)[
i];
5371 for (
int r = j_row_start[row]; r < j_row_start[row + 1]; r++)
5373 if (internal_block_number(j_column_index[r]) ==
int(block_j))
5375 values_for_proc[p][ptr] = j_value[r];
5376 column_index_for_proc[p][ptr] =
5377 internal_index_in_block(j_column_index[r]);
5384 MPI_Datatype types[2];
5385 MPI_Type_contiguous(total_nnz_send[p], MPI_DOUBLE, &types[0]);
5386 MPI_Type_commit(&types[0]);
5387 MPI_Type_contiguous(total_nnz_send[p], MPI_INT, &types[1]);
5388 MPI_Type_commit(&types[1]);
5391 MPI_Aint displacement[2];
5392 MPI_Get_address(values_for_proc[p], &displacement[0]);
5393 MPI_Get_address(column_index_for_proc[p], &displacement[1]);
5396 displacement[1] -= displacement[0];
5397 displacement[0] -= displacement[0];
5401 length[0] = length[1] = 1;
5404 MPI_Datatype final_type;
5405 MPI_Type_create_struct(2, length, displacement, types, &final_type);
5406 MPI_Type_commit(&final_type);
5407 MPI_Type_free(&types[0]);
5408 MPI_Type_free(&types[1]);
5412 MPI_Isend(values_for_proc[p],
5417 this->distribution_pt()->communicator_pt()->mpi_comm(),
5419 send_req.push_back(req);
5420 MPI_Type_free(&final_type);
5427 int c_recv = recv1_req.size();
5430 MPI_Waitall(c_recv, &recv1_req[0], MPI_STATUS_IGNORE);
5435 int local_block_nnz = 0;
5436 for (
unsigned p = 0; p < nproc; p++)
5439 for (
unsigned i = 0;
i < Nrows_to_recv_for_get_block(block_i, p);
i++)
5441 total_nnz_recv_from_proc[p] += nnz_recv[p][
i];
5443 local_block_nnz += total_nnz_recv_from_proc[p];
5451 for (
unsigned p = 0; p < nproc; p++)
5453 if (Nrows_to_recv_for_get_block(block_i, p) > 0)
5455 n_recv_block[p] = 1;
5457 for (
unsigned i = 1;
i < Nrows_to_recv_for_get_block(block_i, p);
i++)
5459 if (Rows_to_recv_for_get_block(block_i, p)[
i] !=
5460 Rows_to_recv_for_get_block(block_i, p)[
i - 1] + 1)
5468 int* row_start_recv =
new int[nrow_local + 1];
5469 for (
unsigned i = 0;
i <= nrow_local;
i++)
5471 row_start_recv[
i] = 0;
5473 for (
unsigned p = 0; p < nproc; p++)
5475 for (
unsigned i = 0;
i < Nrows_to_recv_for_get_block(block_i, p);
i++)
5477 row_start_recv[Rows_to_recv_for_get_block(block_i, p)[
i]] =
5481 int g = row_start_recv[0];
5482 row_start_recv[0] = 0;
5483 for (
unsigned i = 1;
i < nrow_local;
i++)
5486 g = row_start_recv[
i];
5487 row_start_recv[
i] = row_start_recv[
i - 1] + temp_g;
5489 row_start_recv[nrow_local] = row_start_recv[nrow_local - 1] + g;
5494 for (
unsigned p = 0; p < nproc; p++)
5496 if (Nrows_to_recv_for_get_block(block_i, p) > 0)
5498 offset_recv_block[p] =
new int[n_recv_block[p]];
5499 offset_recv_block[p][0] = 0;
5500 nnz_recv_block[p] =
new int[n_recv_block[p]];
5501 for (
int i = 0;
i < n_recv_block[p];
i++)
5503 nnz_recv_block[p][
i] = 0;
5506 nnz_recv_block[p][ptr] += nnz_recv[p][0];
5507 offset_recv_block[p][0] =
5508 row_start_recv[Rows_to_recv_for_get_block(block_i, p)[0]];
5509 for (
unsigned i = 1;
i < Nrows_to_recv_for_get_block(block_i, p);
i++)
5511 if (Rows_to_recv_for_get_block(block_i, p)[
i] !=
5512 Rows_to_recv_for_get_block(block_i, p)[
i - 1] + 1)
5515 offset_recv_block[p][ptr] =
5516 row_start_recv[Rows_to_recv_for_get_block(block_i, p)[
i]];
5518 nnz_recv_block[p][ptr] += nnz_recv[p][
i];
5521 delete[] nnz_recv[p];
5525 int* column_index_recv =
new int[local_block_nnz];
5526 double* values_recv =
new double[local_block_nnz];
5528 for (
unsigned p = 0; p < nproc; p++)
5532 if (total_nnz_recv_from_proc[p] != 0)
5535 MPI_Datatype types[2];
5536 MPI_Type_indexed(n_recv_block[p],
5538 offset_recv_block[p],
5541 MPI_Type_commit(&types[0]);
5542 MPI_Type_indexed(n_recv_block[p],
5544 offset_recv_block[p],
5547 MPI_Type_commit(&types[1]);
5550 MPI_Aint displacements[2];
5551 MPI_Get_address(values_recv, &displacements[0]);
5552 MPI_Get_address(column_index_recv, &displacements[1]);
5553 displacements[1] -= displacements[0];
5554 displacements[0] -= displacements[0];
5558 length[0] = length[1] = 1;
5561 MPI_Datatype final_type;
5562 MPI_Type_create_struct(
5563 2, length, displacements, types, &final_type);
5564 MPI_Type_commit(&final_type);
5565 MPI_Type_free(&types[0]);
5566 MPI_Type_free(&types[1]);
5570 MPI_Irecv(values_recv,
5575 this->distribution_pt()->communicator_pt()->mpi_comm(),
5577 recv2_req.push_back(req);
5578 MPI_Type_free(&final_type);
5584 unsigned block_ptr = 0;
5585 unsigned counter = 0;
5586 int nrow_send = Nrows_to_send_for_get_block(block_i, my_rank);
5589 unsigned offset = offset_recv_block[my_rank][0];
5590 for (
int i = 0;
i < nrow_send;
i++)
5594 if (Rows_to_recv_for_get_block(block_i, p)[
i] !=
5595 Rows_to_recv_for_get_block(block_i, p)[
i - 1] + 1)
5599 offset = offset_recv_block[my_rank][block_ptr];
5602 unsigned row = Rows_to_send_for_get_block(block_i, my_rank)[
i];
5603 for (
int r = j_row_start[row]; r < j_row_start[row + 1]; r++)
5605 if (internal_block_number(j_column_index[r]) ==
int(block_j))
5607 values_recv[offset + counter] = j_value[r];
5608 column_index_recv[offset + counter] =
5609 internal_index_in_block(j_column_index[r]);
5619 c_recv = recv2_req.size();
5622 MPI_Waitall(c_recv, &recv2_req[0], MPI_STATUS_IGNORE);
5626 output_block.
build(Internal_block_distribution_pt[block_i]);
5634 int c_send = send_req.size();
5637 MPI_Waitall(c_send, &send_req[0], MPI_STATUS_IGNORE);
5641 for (
unsigned p = 0; p < nproc; p++)
5643 delete[] nnz_send[p];
5644 delete[] column_index_for_proc[p];
5645 delete[] values_for_proc[p];
5646 delete[] offset_recv_block[p];
5647 delete[] nnz_recv_block[p];
5651 std::ostringstream error_message;
5652 error_message <<
"The matrix is distributed and on more than one "
5653 <<
"processor. MPI is required.";
5655 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5672 const unsigned& block_i,
5673 const unsigned& block_j,
5675 const bool& ignore_replacement_block)
const
5679 unsigned para_ndofs = ndof_types();
5682 if (block_i >= para_ndofs || block_j >= para_ndofs)
5684 std::ostringstream err_msg;
5685 err_msg <<
"Requested dof block (" << block_i <<
"," << block_j
5686 <<
"), however this preconditioner has ndof_types() "
5687 <<
"= " << para_ndofs << std::endl;
5689 err_msg.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
5694 Replacement_dof_block_pt.get(block_i, block_j);
5696 if ((tmp_block_pt == 0) || ignore_replacement_block)
5699 const unsigned ndof_in_parent_i =
5700 Doftype_coarsen_map_coarse[block_i].size();
5701 const unsigned ndof_in_parent_j =
5702 Doftype_coarsen_map_coarse[block_j].size();
5704 if (ndof_in_parent_i == 1 && ndof_in_parent_j == 1)
5706 unsigned parent_dof_i = Doftype_coarsen_map_coarse[block_i][0];
5707 unsigned parent_dof_j = Doftype_coarsen_map_coarse[block_j][0];
5709 if (is_master_block_preconditioner())
5711 internal_get_block(parent_dof_i, parent_dof_j, output_block);
5715 parent_dof_i = Doftype_in_master_preconditioner_coarse[parent_dof_i];
5716 parent_dof_j = Doftype_in_master_preconditioner_coarse[parent_dof_j];
5718 master_block_preconditioner_pt()->get_dof_level_block(
5719 parent_dof_i, parent_dof_j, output_block, ignore_replacement_block);
5725 ndof_in_parent_i, ndof_in_parent_j, 0);
5730 for (
unsigned dof_i = 0; dof_i < ndof_in_parent_i; dof_i++)
5732 unsigned parent_dof_i = Doftype_coarsen_map_coarse[block_i][dof_i];
5733 if (is_subsidiary_block_preconditioner())
5736 Doftype_in_master_preconditioner_coarse[parent_dof_i];
5739 for (
unsigned dof_j = 0; dof_j < ndof_in_parent_j; dof_j++)
5741 unsigned parent_dof_j = Doftype_coarsen_map_coarse[block_j][dof_j];
5745 new_block[dof_i][dof_j] = 1;
5747 if (is_master_block_preconditioner())
5750 parent_dof_i, parent_dof_j, *tmp_blocks_pt(dof_i, dof_j));
5755 Doftype_in_master_preconditioner_coarse[parent_dof_j];
5757 master_block_preconditioner_pt()->get_dof_level_block(
5760 *tmp_blocks_pt(dof_i, dof_j),
5761 ignore_replacement_block);
5768 for (
unsigned parent_dof_i = 0; parent_dof_i < ndof_in_parent_i;
5771 unsigned mapped_dof_i =
5772 Doftype_coarsen_map_coarse[block_i][parent_dof_i];
5774 if (is_master_block_preconditioner())
5776 tmp_row_dist_pt[parent_dof_i] =
5777 Internal_block_distribution_pt[mapped_dof_i];
5782 Doftype_in_master_preconditioner_coarse[mapped_dof_i];
5784 tmp_row_dist_pt[parent_dof_i] =
5785 master_block_preconditioner_pt()->dof_block_distribution_pt(
5792 for (
unsigned parent_dof_j = 0; parent_dof_j < ndof_in_parent_j;
5795 unsigned mapped_dof_j =
5796 Doftype_coarsen_map_coarse[block_j][parent_dof_j];
5798 if (is_master_block_preconditioner())
5800 tmp_col_dist_pt[parent_dof_j] =
5801 Internal_block_distribution_pt[mapped_dof_j];
5806 Doftype_in_master_preconditioner_coarse[mapped_dof_j];
5807 tmp_col_dist_pt[parent_dof_j] =
5808 master_block_preconditioner_pt()->dof_block_distribution_pt(
5814 tmp_row_dist_pt, tmp_col_dist_pt, tmp_blocks_pt, output_block);
5816 for (
unsigned dof_i = 0; dof_i < ndof_in_parent_i; dof_i++)
5818 for (
unsigned dof_j = 0; dof_j < ndof_in_parent_j; dof_j++)
5820 if (new_block[dof_i][dof_j])
5822 delete tmp_blocks_pt(dof_i, dof_j);
5838 template<
typename MATRIX>
5840 const unsigned& block_i,
5841 const unsigned& block_j,
5842 const MATRIX* block_matrix_pt)
const
5848 unsigned n_row = matrix_pt()->
nrow();
5851 unsigned n_col = matrix_pt()->ncol();
5854 for (
unsigned i = 0;
i < n_row;
i++)
5858 if (
static_cast<int>(block_i) == this->internal_block_number(
i))
5861 for (
unsigned j = 0; j < n_col; j++)
5865 if (
static_cast<int>(block_j) == this->internal_block_number(j))
5869 if (matrix_pt()->
operator()(
i, j) !=
5870 block_matrix_pt->operator()(internal_index_in_block(
i),
5871 internal_index_in_block(j)))
5883 std::ostringstream error_message;
5884 error_message <<
"The require elements have not been successfully copied"
5885 <<
" from the original matrix to the block matrices";
5887 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
Block Preconditioner base class. The block structure of the overall problem is determined from the Me...
void internal_return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
void return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
void internal_return_block_vectors(const Vector< unsigned > &block_vec_number, const Vector< DoubleVector > &s, DoubleVector &v) const
A helper function, takes the vector of block vectors, s, and copies its entries into the naturally or...
void internal_return_block_ordered_preconditioner_vector(const DoubleVector &w, DoubleVector &v) const
Takes the block ordered vector, w, and reorders it in the natural order. Reordered vector is returned...
void get_blocks(DenseMatrix< bool > &required_blocks, DenseMatrix< MATRIX * > &block_matrix_pt) const
Get all the block matrices required by the block preconditioner. Takes a pointer to a matrix of bools...
void return_concatenated_block_vector(const Vector< unsigned > &block_vec_number, const DoubleVector &b, DoubleVector &v) const
Takes concatenated block ordered vector, b, and copies its entries to the appropriate entries in the ...
static bool Run_block_matrix_test
Static boolean to allow block_matrix_test(...) to be run. Defaults to false.
void return_block_vectors(const Vector< unsigned > &block_vec_number, const Vector< DoubleVector > &s, DoubleVector &v) const
Takes the vector of block vectors, s, and copies its entries into the naturally ordered vector,...
void get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
Takes the naturally ordered vector, v and returns the n-th block vector, b. Here n is the block numbe...
void return_block_ordered_preconditioner_vector(const DoubleVector &w, DoubleVector &v) const
Takes the block ordered vector, w, and reorders it in natural order. Reordered vector is returned in ...
void turn_into_subsidiary_block_preconditioner(BlockPreconditioner< MATRIX > *master_block_prec_pt, const Vector< unsigned > &doftype_in_master_preconditioner_coarse)
Function to turn this preconditioner into a subsidiary preconditioner that operates within a bigger "...
void internal_get_block(const unsigned &i, const unsigned &j, MATRIX &output_block) const
Gets block (i,j) from the matrix pointed to by Matrix_pt and returns it in output_block....
void internal_get_block_vectors(const Vector< unsigned > &block_vec_number, const DoubleVector &v, Vector< DoubleVector > &s) const
Takes the naturally ordered vector and rearranges it into a vector of sub vectors corresponding to th...
void get_block_vectors(const Vector< unsigned > &block_vec_number, const DoubleVector &v, Vector< DoubleVector > &s) const
Takes the naturally ordered vector and rearranges it into a vector of sub vectors corresponding to th...
void internal_get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
A helper function, takes the naturally ordered vector, v, and extracts the n-th block vector,...
virtual void block_setup()
Determine the size of the matrix blocks and setup the lookup schemes relating the global degrees of f...
void get_concatenated_block_vector(const Vector< unsigned > &block_vec_number, const DoubleVector &v, DoubleVector &b)
Takes the naturally ordered vector and extracts the blocks indicated by the block number (the values)...
void internal_get_block_ordered_preconditioner_vector(const DoubleVector &v, DoubleVector &w) const
Given the naturally ordered vector, v, return the vector rearranged in block order in w....
void get_block_ordered_preconditioner_vector(const DoubleVector &v, DoubleVector &w)
Given the naturally ordered vector, v, return the vector rearranged in block order in w....
A class for compressed row matrices. This is a distributable object.
int * column_index()
Access to C-style column index array.
int * row_start()
Access to C-style row_start array.
void build_without_copy(const unsigned &ncol, const unsigned &nnz, double *value, int *column_index, int *row_start)
keeps the existing distribution and just matrix that is stored without copying the matrix data
double * value()
Access to C-style value array.
void build(const LinearAlgebraDistribution *distribution_pt, const unsigned &ncol, const Vector< double > &value, const Vector< int > &column_index, const Vector< int > &row_start)
build method: vector of values, vector of column indices, vector of row starts and number of rows and...
static long Is_unclassified
Static "Magic number" used in place of the equation number to denote a value that hasn't been classif...
unsigned long nrow() const
Return the number of rows of the matrix.
unsigned long ncol() const
Return the number of columns of the matrix.
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
unsigned nrow() const
access function to the number of global rows.
A vector in the mathematical sense, initially developed for linear algebra type applications....
void build(const DoubleVector &old_vector)
Just copys the argument DoubleVector.
double * values_pt()
access function to the underlying values
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
An OomphLibError object which should be thrown when an run-time error is encountered....
An OomphLibWarning object which should be created as a temporary object to issue a warning....
void deep_copy(const CRDoubleMatrix *const in_matrix_pt, CRDoubleMatrix &out_matrix)
Create a deep copy of the matrix pointed to by in_matrix_pt.
void concatenate_without_communication(const Vector< DoubleVector * > &in_vector_pt, DoubleVector &out_vector)
Concatenate DoubleVectors. Takes a Vector of DoubleVectors. If the out vector is built,...
void split_without_communication(const DoubleVector &in_vector, Vector< DoubleVector * > &out_vector_pt)
Split a DoubleVector into the out DoubleVectors. Data stays on its current processor,...
std::string string(const unsigned &i)
Return the i-th string or "" if the relevant string hasn't been defined.
void concatenate(const Vector< LinearAlgebraDistribution * > &in_distribution_pt, LinearAlgebraDistribution &out_distribution)
Takes a vector of LinearAlgebraDistribution objects and concatenates them such that the nrow_local of...
//////////////////////////////////////////////////////////////////// ////////////////////////////////...