29 #include <oomph-lib-config.h>
49 Vector<CRDoubleMatrix*> matrix_pt,
50 Vector<Preconditioner*> prec_pt,
51 const OomphCommunicator* comm_pt)
57 Nprec = prec_pt.size();
63 std::ostringstream error_message;
64 error_message <<
"The PreconditionerArray requires at least 2 "
67 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
70 if (matrix_pt.size() !=
Nprec)
72 std::ostringstream error_message;
73 error_message <<
"The same number of preconditioners and matrices must "
74 <<
"be passed to the setup_preconditioners(...).";
76 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
90 if (matrix_pt[
i] == 0)
92 std::ostringstream error_message;
93 error_message <<
"matrix_pt[" <<
i <<
"] = NULL.";
94 throw OomphLibError(error_message.str(),
95 OOMPH_CURRENT_FUNCTION,
96 OOMPH_EXCEPTION_LOCATION);
100 if (!matrix_pt[
i]->built())
102 std::ostringstream error_message;
103 error_message <<
"Matrix " <<
i <<
" has not been built.";
104 throw OomphLibError(error_message.str(),
105 OOMPH_CURRENT_FUNCTION,
106 OOMPH_EXCEPTION_LOCATION);
115 matrix_pt[
i]->distribution_pt()->communicator_pt());
122 *matrix_pt[
i]->distribution_pt()->communicator_pt())
124 std::ostringstream error_message;
125 error_message <<
"All matrices must have the same communicator.";
126 throw OomphLibError(error_message.str(),
127 OOMPH_CURRENT_FUNCTION,
128 OOMPH_EXCEPTION_LOCATION);
135 new LinearAlgebraDistribution(matrix_pt[
i]->distribution_pt());
149 for (
unsigned p = 0; p <
Nprec; p++)
155 for (
unsigned p = 0; p <
Nprec - 1; p++)
163 for (
unsigned p = 0; p <
Nprec; p++)
167 std::ostringstream error_message;
168 error_message <<
"We only have " << nproc <<
" processor[s]!\n"
169 <<
"This is not enough to perform the " <<
Nprec
170 <<
" block solves in parallel! Sorry! \n"
171 <<
"Please run this with more processors or disable the\n"
172 <<
"request for two-level paralellism.\n";
173 throw OomphLibError(error_message.str(),
174 OOMPH_CURRENT_FUNCTION,
175 OOMPH_EXCEPTION_LOCATION);
193 CRDoubleMatrix* local_matrix_pt = 0;
202 Vector<MPI_Request> req;
208 Vector<Vector<unsigned>> target_first_row(
Nprec);
209 Vector<Vector<unsigned>> target_nrow_local(
Nprec);
213 Vector<Vector<unsigned>> nnz_send(
Nprec);
214 Vector<Vector<unsigned>> nnz_recv(
Nprec);
228 for (
unsigned i = 0;
i <
Nprec;
i++)
232 if (matrix_pt[
i]->distributed())
238 unsigned nrow = matrix_pt[
i]->nrow();
241 target_first_row[
i].resize(nproc);
242 target_nrow_local[
i].resize(nproc);
244 for (
unsigned p = 0; p < nproc_local; p++)
247 target_first_row[
i][pp] =
248 unsigned(
double(p * nrow) /
double(nproc_local));
250 for (
unsigned p = 0; p < nproc_local - 1; p++)
253 target_nrow_local[
i][pp] =
254 target_first_row[
i][pp + 1] - target_first_row[
i][pp];
257 target_nrow_local[
i][last_local_proc] =
258 nrow - target_first_row[
i][last_local_proc];
261 Vector<unsigned> current_first_row(nproc);
262 Vector<unsigned> current_nrow_local(nproc);
263 for (
unsigned p = 0; p < nproc; p++)
265 current_first_row[p] = matrix_pt[
i]->first_row(p);
266 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
277 for (
unsigned p = 0; p < nproc; p++)
280 if ((target_first_row[
i][p] <
281 (current_first_row[my_rank] + current_nrow_local[my_rank])) &&
282 (current_first_row[my_rank] <
283 (target_first_row[
i][p] + target_nrow_local[
i][p])))
286 std::max(current_first_row[my_rank], target_first_row[
i][p]);
289 (current_first_row[my_rank] + current_nrow_local[my_rank]),
290 (target_first_row[
i][p] + target_nrow_local[
i][p])) -
295 if ((target_first_row[
i][my_rank] <
296 (current_first_row[p] + current_nrow_local[p])) &&
297 (current_first_row[p] < (target_first_row[
i][my_rank] +
298 target_nrow_local[
i][my_rank])))
301 std::max(current_first_row[p], target_first_row[
i][my_rank]);
303 std::min((current_first_row[p] + current_nrow_local[p]),
304 (target_first_row[
i][my_rank] +
305 target_nrow_local[
i][my_rank])) -
311 nnz_send[
i].resize(nproc);
315 for (
unsigned p = 0; p < nproc; p++)
319 int* row_start = matrix_pt[
i]->row_start();
328 for (
unsigned p = 0; p < nproc; p++)
340 MPI_Isend(&nnz_send[
i][p],
353 nnz_recv[
i].resize(nproc);
356 for (
unsigned pp = 0; pp < nproc; pp++)
359 unsigned p = (nproc + my_rank - pp) % nproc;
377 nnz_recv[
i][p] = nnz_temp;
384 nnz_recv[
i][p] = nnz_send[
i][p];
389 double* values_send = matrix_pt[
i]->value();
390 int* row_start_send = matrix_pt[
i]->row_start();
391 int* column_index_send = matrix_pt[
i]->column_index();
394 for (
unsigned p = 0; p < nproc; p++)
400 if (nnz_send[
i][p] != 0)
407 int offset_nnz = row_start_send[offset_n];
413 MPI_Isend(values_send + offset_nnz,
426 MPI_Isend(column_index_send + offset_nnz,
439 MPI_Isend(row_start_send + offset_n,
455 for (
unsigned i = 0;
i <
Nprec;
i++)
459 if (!matrix_pt[
i]->distributed())
461 oomph_info <<
"matrix not distributed" << std::endl;
466 LinearAlgebraDistribution* temp_dist_pt =
467 new LinearAlgebraDistribution(
471 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
475 double* values_pt = matrix_pt[
i]->value();
476 int* column_index_pt = matrix_pt[
i]->column_index();
477 int* row_start_pt = matrix_pt[
i]->row_start();
480 local_matrix_pt->build_without_copy(matrix_pt[
i]->ncol(),
498 LinearAlgebraDistribution* temp_dist_pt =
500 target_first_row[
i][my_rank],
501 target_nrow_local[
i][my_rank]);
504 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
510 unsigned nnz_total = 0;
511 for (
unsigned p = 0; p < nproc; p++)
513 nnz_total += nnz_recv[
i][p];
517 Vector<unsigned> nnz_start_proc;
518 Vector<unsigned> nnz_start_index;
519 unsigned row_ptr = target_first_row[
i][my_rank];
521 unsigned nnz_ptr = 0;
522 for (p = 0; p < int(nproc); p++)
527 nnz_start_proc.push_back(p);
528 nnz_start_index.push_back(nnz_ptr);
529 nnz_ptr += nnz_recv[
i][p];
536 double* values_recv =
new double[nnz_total];
537 int* column_index_recv =
new int[nnz_total];
538 int* row_start_recv =
new int[target_nrow_local[
i][my_rank] + 1];
541 for (
unsigned pp = 0; pp < nproc; pp++)
544 unsigned p = (nproc + my_rank - pp) % nproc;
550 if (nnz_recv[
i][p] != 0)
558 while (nnz_start_proc[k] != p)
562 int offset_nnz = nnz_start_index[k];
567 MPI_Recv(values_recv + offset_nnz,
578 MPI_Recv(column_index_recv + offset_nnz,
589 MPI_Recv(row_start_recv + offset_n,
601 if (nnz_recv[
i][p] != 0)
604 double* values_send = matrix_pt[
i]->value();
605 int* row_start_send = matrix_pt[
i]->row_start();
606 int* column_index_send = matrix_pt[
i]->column_index();
609 unsigned offset_n_send =
612 unsigned offset_nnz_send = row_start_send[offset_n_send];
616 target_first_row[
i][my_rank];
620 while (nnz_start_proc[k] != p)
624 unsigned offset_nnz_recv = nnz_start_index[k];
629 unsigned n_nnz = nnz_send[
i][my_rank];
630 for (
unsigned j = 0; j < n_nnz; j++)
632 values_recv[offset_nnz_recv + j] =
633 values_send[offset_nnz_send + j];
634 column_index_recv[offset_nnz_recv + j] =
635 column_index_send[offset_nnz_send + j];
640 for (
unsigned j = 0; j < n_n; j++)
642 row_start_recv[offset_n_recv + j] =
643 row_start_send[offset_n_send + j];
654 unsigned nproc_contrib = nnz_start_index.size();
655 for (
unsigned j = 0; j < nproc_contrib; j++)
658 target_first_row[
i][my_rank];
661 unsigned nnz_inc = nnz_start_index[j] - row_start_recv[first];
662 for (
unsigned k = first; k < last; k++)
664 row_start_recv[k] += nnz_inc;
667 row_start_recv[target_nrow_local[
i][my_rank]] = int(nnz_total);
670 local_matrix_pt->build_without_copy(matrix_pt[
i]->ncol(),
682 Vector<MPI_Status> stat(c);
683 MPI_Waitall(c, &req[0], &stat[0]);
698 unsigned* nnz_recv_temp =
new unsigned[nproc *
Nprec];
699 for (
unsigned j = 0; j < nproc *
Nprec; j++)
701 nnz_recv_temp[j] = 0;
706 for (
unsigned i = 0;
i <
Nprec;
i++)
710 if (!matrix_pt[
i]->distributed())
716 LinearAlgebraDistribution* temp_dist_pt =
717 new LinearAlgebraDistribution(
721 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
725 double* values_pt = matrix_pt[
i]->value();
726 int* column_index_pt = matrix_pt[
i]->column_index();
727 int* row_start_pt = matrix_pt[
i]->row_start();
730 local_matrix_pt->build_without_copy(matrix_pt[
i]->ncol(),
746 unsigned nrow = matrix_pt[
i]->nrow();
749 target_first_row[
i].resize(nproc);
750 target_nrow_local[
i].resize(nproc);
752 for (
unsigned p = 0; p < nproc_local; p++)
755 target_first_row[
i][pp] =
756 unsigned(
double(p * nrow) /
double(nproc_local));
758 for (
unsigned p = 0; p < nproc_local - 1; p++)
761 target_nrow_local[
i][pp] =
762 target_first_row[
i][pp + 1] - target_first_row[
i][pp];
765 target_nrow_local[
i][last_local_proc] =
766 nrow - target_first_row[
i][last_local_proc];
769 Vector<unsigned> current_first_row(nproc);
770 Vector<unsigned> current_nrow_local(nproc);
771 for (
unsigned p = 0; p < nproc; p++)
773 current_first_row[p] = matrix_pt[
i]->first_row(p);
774 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
785 for (
unsigned p = 0; p < nproc; p++)
788 if ((target_first_row[
i][p] <
789 (current_first_row[my_rank] + current_nrow_local[my_rank])) &&
790 (current_first_row[my_rank] <
791 (target_first_row[
i][p] + target_nrow_local[
i][p])))
794 std::max(current_first_row[my_rank], target_first_row[
i][p]);
797 (current_first_row[my_rank] + current_nrow_local[my_rank]),
798 (target_first_row[
i][p] + target_nrow_local[
i][p])) -
803 if ((target_first_row[
i][my_rank] <
804 (current_first_row[p] + current_nrow_local[p])) &&
805 (current_first_row[p] < (target_first_row[
i][my_rank] +
806 target_nrow_local[
i][my_rank])))
809 std::max(current_first_row[p], target_first_row[
i][my_rank]);
811 std::min((current_first_row[p] + current_nrow_local[p]),
812 (target_first_row[
i][my_rank] +
813 target_nrow_local[
i][my_rank])) -
819 nnz_send[
i].resize(nproc);
823 for (
unsigned p = 0; p < nproc; p++)
827 int* row_start = matrix_pt[
i]->row_start();
836 nnz_recv[
i].resize(nproc);
839 for (
unsigned p = 0; p < nproc; p++)
853 MPI_Isend(&nnz_send[
i][p],
869 MPI_Irecv(nnz_recv_temp + (
i * nproc) + p,
884 nnz_recv_temp[(
i * nproc) + p] = nnz_send[
i][p];
892 Vector<MPI_Status> stat(c);
893 MPI_Waitall(c, &req[0], &stat[0]);
898 for (
unsigned i = 0;
i <
Nprec;
i++)
900 for (
unsigned p = 0; p < nproc; p++)
902 nnz_recv[
i][p] = nnz_recv_temp[(
i * nproc) + p];
905 delete nnz_recv_temp;
910 unsigned nnz_total = 0;
911 for (
unsigned p = 0; p < nproc; p++)
913 nnz_total += nnz_recv[
Color][p];
917 Vector<unsigned> nnz_start_proc;
918 Vector<unsigned> nnz_start_index;
919 unsigned row_ptr = target_first_row[
Color][my_rank];
921 unsigned nnz_ptr = 0;
922 for (p = 0; p < int(nproc); p++)
927 nnz_start_proc.push_back(p);
928 nnz_start_index.push_back(nnz_ptr);
929 nnz_ptr += nnz_recv[
Color][p];
936 Vector<MPI_Datatype> datatypes;
939 double* values_recv =
new double[nnz_total];
940 int* column_index_recv =
new int[nnz_total];
941 int* row_start_recv =
new int[target_nrow_local[
Color][my_rank] + 1];
947 Vector<MPI_Request> send_req;
951 for (
unsigned i = 0;
i <
Nprec;
i++)
954 double* values_send = matrix_pt[
i]->value();
955 int* row_start_send = matrix_pt[
i]->row_start();
956 int* column_index_send = matrix_pt[
i]->column_index();
959 for (
unsigned p = 0; p < nproc; p++)
965 if (nnz_send[
i][p] != 0)
973 MPI_Datatype datatype_values;
975 int(nnz_send[
i][p]), MPI_DOUBLE, &datatype_values);
976 MPI_Type_commit(&datatype_values);
977 datatypes.push_back(datatype_values);
980 MPI_Datatype datatype_column_index;
982 int(nnz_send[
i][p]), MPI_INT, &datatype_column_index);
983 MPI_Type_commit(&datatype_column_index);
984 datatypes.push_back(datatype_column_index);
987 MPI_Datatype datatype_row_start;
990 MPI_Type_commit(&datatype_row_start);
991 datatypes.push_back(datatype_row_start);
994 MPI_Datatype typelist[3];
995 typelist[0] = datatype_values;
996 typelist[1] = datatype_column_index;
997 typelist[2] = datatype_row_start;
1004 int offset_nnz = row_start_send[offset_n];
1007 MPI_Aint displacements[3];
1008 MPI_Get_address(values_send + offset_nnz, &displacements[0]);
1009 MPI_Get_address(column_index_send + offset_nnz,
1011 MPI_Get_address(row_start_send + offset_n, &displacements[2]);
1012 for (
int j = 2; j >= 0; j--)
1014 displacements[j] -= displacements[0];
1018 int block_length[3];
1019 block_length[0] = block_length[1] = block_length[2] = 1;
1022 MPI_Datatype send_type;
1023 MPI_Type_create_struct(
1024 3, block_length, displacements, typelist, &send_type);
1025 MPI_Type_commit(&send_type);
1026 datatypes.push_back(send_type);
1029 int tag = this->
compute_tag(nproc, my_rank, p, 1);
1031 send_req.push_back(tr1);
1032 MPI_Isend(values_send + offset_nnz,
1048 unsigned c_recv = 0;
1049 Vector<MPI_Request> recv_req;
1052 for (
unsigned p = 0; p < nproc; p++)
1058 if (nnz_recv[
Color][p] != 0)
1066 MPI_Datatype datatype_values;
1067 MPI_Type_contiguous(
1068 int(nnz_recv[
Color][p]), MPI_DOUBLE, &datatype_values);
1069 MPI_Type_commit(&datatype_values);
1070 datatypes.push_back(datatype_values);
1073 MPI_Datatype datatype_column_index;
1074 MPI_Type_contiguous(
1075 int(nnz_recv[
Color][p]), MPI_INT, &datatype_column_index);
1076 MPI_Type_commit(&datatype_column_index);
1077 datatypes.push_back(datatype_column_index);
1080 MPI_Datatype datatype_row_start;
1083 &datatype_row_start);
1084 MPI_Type_commit(&datatype_row_start);
1085 datatypes.push_back(datatype_row_start);
1088 MPI_Datatype typelist[3];
1089 typelist[0] = datatype_values;
1090 typelist[1] = datatype_column_index;
1091 typelist[2] = datatype_row_start;
1099 while (nnz_start_proc[k] != p)
1103 int offset_nnz = nnz_start_index[k];
1106 MPI_Aint displacements[3];
1107 MPI_Get_address(values_recv + offset_nnz, &displacements[0]);
1108 MPI_Get_address(column_index_recv + offset_nnz, &displacements[1]);
1109 MPI_Get_address(row_start_recv + offset_n, &displacements[2]);
1110 for (
int j = 2; j >= 0; j--)
1112 displacements[j] -= displacements[0];
1116 int block_length[3];
1117 block_length[0] = block_length[1] = block_length[2] = 1;
1120 MPI_Datatype recv_type;
1121 MPI_Type_create_struct(
1122 3, block_length, displacements, typelist, &recv_type);
1123 MPI_Type_commit(&recv_type);
1124 datatypes.push_back(recv_type);
1127 int tag = this->
compute_tag(nproc, p, my_rank, 1);
1129 recv_req.push_back(tr1);
1130 MPI_Irecv(values_recv + offset_nnz,
1143 if (nnz_recv[
Color][my_rank] != 0)
1146 double* values_send = matrix_pt[
Color]->value();
1147 int* row_start_send = matrix_pt[
Color]->row_start();
1148 int* column_index_send = matrix_pt[
Color]->column_index();
1152 matrix_pt[
Color]->first_row(my_rank);
1155 unsigned offset_nnz_send = row_start_send[offset_n_send];
1159 target_first_row[
Color][my_rank];
1163 while (nnz_start_proc[k] != my_rank)
1167 unsigned offset_nnz_recv = nnz_start_index[k];
1172 unsigned n_nnz = nnz_send[
Color][my_rank];
1173 for (
unsigned j = 0; j < n_nnz; j++)
1175 values_recv[offset_nnz_recv + j] = values_send[offset_nnz_send + j];
1176 column_index_recv[offset_nnz_recv + j] =
1177 column_index_send[offset_nnz_send + j];
1182 for (
unsigned j = 0; j < n_n; j++)
1184 row_start_recv[offset_n_recv + j] = row_start_send[offset_n_send + j];
1189 LinearAlgebraDistribution* temp_dist_pt =
1191 target_first_row[
Color][my_rank],
1192 target_nrow_local[
Color][my_rank]);
1195 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1196 delete temp_dist_pt;
1203 Vector<MPI_Status> recv_stat(c_recv);
1204 MPI_Waitall(c_recv, &recv_req[0], &recv_stat[0]);
1212 unsigned nproc_contrib = nnz_start_index.size();
1213 for (
unsigned j = 0; j < nproc_contrib; j++)
1216 target_first_row[
Color][my_rank];
1218 unsigned nnz_inc = nnz_start_index[j] - row_start_recv[first];
1219 for (
unsigned k = first; k < last; k++)
1221 row_start_recv[k] += nnz_inc;
1224 row_start_recv[target_nrow_local[
Color][my_rank]] = int(nnz_total);
1227 local_matrix_pt->build_without_copy(matrix_pt[
Color]->ncol(),
1236 Vector<MPI_Status> send_stat(c_recv);
1237 MPI_Waitall(c_send, &send_req[0], &send_stat[0]);
1243 unsigned ndatatypes = datatypes.size();
1244 for (
unsigned i = 0;
i < ndatatypes;
i++)
1246 MPI_Type_free(&datatypes[
i]);
1261 unsigned* nnz_recv_temp =
new unsigned[nproc *
Nprec];
1262 for (
unsigned j = 0; j < nproc *
Nprec; j++)
1264 nnz_recv_temp[j] = 0;
1269 for (
unsigned i = 0;
i <
Nprec;
i++)
1273 if (!matrix_pt[
i]->distributed())
1279 LinearAlgebraDistribution* temp_dist_pt =
1280 new LinearAlgebraDistribution(
1284 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1285 delete temp_dist_pt;
1288 double* values_pt = matrix_pt[
i]->value();
1289 int* column_index_pt = matrix_pt[
i]->column_index();
1290 int* row_start_pt = matrix_pt[
i]->row_start();
1293 local_matrix_pt->build_without_copy(matrix_pt[
i]->ncol(),
1294 matrix_pt[
i]->nnz(),
1309 unsigned nrow = matrix_pt[
i]->nrow();
1312 target_first_row[
i].resize(nproc);
1313 target_nrow_local[
i].resize(nproc);
1315 for (
unsigned p = 0; p < nproc_local; p++)
1318 target_first_row[
i][pp] =
1319 unsigned(
double(p * nrow) /
double(nproc_local));
1321 for (
unsigned p = 0; p < nproc_local - 1; p++)
1324 target_nrow_local[
i][pp] =
1325 target_first_row[
i][pp + 1] - target_first_row[
i][pp];
1328 target_nrow_local[
i][last_local_proc] =
1329 nrow - target_first_row[
i][last_local_proc];
1332 Vector<unsigned> current_first_row(nproc);
1333 Vector<unsigned> current_nrow_local(nproc);
1334 for (
unsigned p = 0; p < nproc; p++)
1336 current_first_row[p] = matrix_pt[
i]->first_row(p);
1337 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
1348 for (
unsigned p = 0; p < nproc; p++)
1351 if ((target_first_row[
i][p] <
1352 (current_first_row[my_rank] + current_nrow_local[my_rank])) &&
1353 (current_first_row[my_rank] <
1354 (target_first_row[
i][p] + target_nrow_local[
i][p])))
1357 std::max(current_first_row[my_rank], target_first_row[
i][p]);
1360 (current_first_row[my_rank] + current_nrow_local[my_rank]),
1361 (target_first_row[
i][p] + target_nrow_local[
i][p])) -
1366 if ((target_first_row[
i][my_rank] <
1367 (current_first_row[p] + current_nrow_local[p])) &&
1368 (current_first_row[p] < (target_first_row[
i][my_rank] +
1369 target_nrow_local[
i][my_rank])))
1372 std::max(current_first_row[p], target_first_row[
i][my_rank]);
1374 std::min((current_first_row[p] + current_nrow_local[p]),
1375 (target_first_row[
i][my_rank] +
1376 target_nrow_local[
i][my_rank])) -
1382 nnz_send[
i].resize(nproc);
1386 for (
unsigned p = 0; p < nproc; p++)
1390 int* row_start = matrix_pt[
i]->row_start();
1399 nnz_recv[
i].resize(nproc);
1402 for (
unsigned p = 0; p < nproc; p++)
1413 int tag = this->
compute_tag(nproc, my_rank, p, 0);
1416 MPI_Isend(&nnz_send[
i][p],
1429 int tag = this->
compute_tag(nproc, p, my_rank, 0);
1432 MPI_Irecv(nnz_recv_temp + (
i * nproc) + p,
1447 nnz_recv_temp[(
i * nproc) + p] = nnz_send[
i][p];
1455 Vector<MPI_Status> stat(c);
1456 MPI_Waitall(c, &req[0], &stat[0]);
1461 for (
unsigned i = 0;
i <
Nprec;
i++)
1463 for (
unsigned p = 0; p < nproc; p++)
1465 nnz_recv[
i][p] = nnz_recv_temp[(
i * nproc) + p];
1468 delete nnz_recv_temp;
1473 unsigned nnz_total = 0;
1474 for (
unsigned p = 0; p < nproc; p++)
1476 nnz_total += nnz_recv[
Color][p];
1480 Vector<unsigned> nnz_start_proc;
1481 Vector<unsigned> nnz_start_index;
1482 unsigned row_ptr = target_first_row[
Color][my_rank];
1484 unsigned nnz_ptr = 0;
1485 for (p = 0; p < int(nproc); p++)
1490 nnz_start_proc.push_back(p);
1491 nnz_start_index.push_back(nnz_ptr);
1492 nnz_ptr += nnz_recv[
Color][p];
1499 Vector<MPI_Datatype> datatypes;
1502 double* values_recv =
new double[nnz_total];
1503 int* column_index_recv =
new int[nnz_total];
1504 int* row_start_recv =
new int[target_nrow_local[
Color][my_rank] + 1];
1509 unsigned c_recv = 0;
1510 Vector<MPI_Request> recv_req;
1513 for (
unsigned p = 0; p < nproc; p++)
1519 if (nnz_recv[
Color][p] != 0)
1527 MPI_Datatype datatype_values;
1528 MPI_Type_contiguous(
1529 int(nnz_recv[
Color][p]), MPI_DOUBLE, &datatype_values);
1530 MPI_Type_commit(&datatype_values);
1531 datatypes.push_back(datatype_values);
1534 MPI_Datatype datatype_column_index;
1535 MPI_Type_contiguous(
1536 int(nnz_recv[
Color][p]), MPI_INT, &datatype_column_index);
1537 MPI_Type_commit(&datatype_column_index);
1538 datatypes.push_back(datatype_column_index);
1541 MPI_Datatype datatype_row_start;
1544 &datatype_row_start);
1545 MPI_Type_commit(&datatype_row_start);
1546 datatypes.push_back(datatype_row_start);
1549 MPI_Datatype typelist[3];
1550 typelist[0] = datatype_values;
1551 typelist[1] = datatype_column_index;
1552 typelist[2] = datatype_row_start;
1560 while (nnz_start_proc[k] != p)
1564 int offset_nnz = nnz_start_index[k];
1567 MPI_Aint displacements[3];
1568 MPI_Get_address(values_recv + offset_nnz, &displacements[0]);
1569 MPI_Get_address(column_index_recv + offset_nnz, &displacements[1]);
1570 MPI_Get_address(row_start_recv + offset_n, &displacements[2]);
1571 for (
int j = 2; j >= 0; j--)
1573 displacements[j] -= displacements[0];
1577 int block_length[3];
1578 block_length[0] = block_length[1] = block_length[2] = 1;
1581 MPI_Datatype recv_type;
1582 MPI_Type_create_struct(
1583 3, block_length, displacements, typelist, &recv_type);
1584 MPI_Type_commit(&recv_type);
1585 datatypes.push_back(recv_type);
1588 int tag = this->
compute_tag(nproc, p, my_rank, 1);
1590 recv_req.push_back(tr1);
1591 MPI_Irecv(values_recv + offset_nnz,
1606 unsigned c_send = 0;
1607 Vector<MPI_Request> send_req;
1611 for (
unsigned i = 0;
i <
Nprec;
i++)
1614 double* values_send = matrix_pt[
i]->value();
1615 int* row_start_send = matrix_pt[
i]->row_start();
1616 int* column_index_send = matrix_pt[
i]->column_index();
1619 for (
unsigned p = 0; p < nproc; p++)
1625 if (nnz_send[
i][p] != 0)
1633 MPI_Datatype datatype_values;
1634 MPI_Type_contiguous(
1635 int(nnz_send[
i][p]), MPI_DOUBLE, &datatype_values);
1636 MPI_Type_commit(&datatype_values);
1637 datatypes.push_back(datatype_values);
1640 MPI_Datatype datatype_column_index;
1641 MPI_Type_contiguous(
1642 int(nnz_send[
i][p]), MPI_INT, &datatype_column_index);
1643 MPI_Type_commit(&datatype_column_index);
1644 datatypes.push_back(datatype_column_index);
1647 MPI_Datatype datatype_row_start;
1648 MPI_Type_contiguous(
1650 MPI_Type_commit(&datatype_row_start);
1651 datatypes.push_back(datatype_row_start);
1654 MPI_Datatype typelist[3];
1655 typelist[0] = datatype_values;
1656 typelist[1] = datatype_column_index;
1657 typelist[2] = datatype_row_start;
1664 int offset_nnz = row_start_send[offset_n];
1667 MPI_Aint displacements[3];
1668 MPI_Get_address(values_send + offset_nnz, &displacements[0]);
1669 MPI_Get_address(column_index_send + offset_nnz,
1671 MPI_Get_address(row_start_send + offset_n, &displacements[2]);
1672 for (
int j = 2; j >= 0; j--)
1674 displacements[j] -= displacements[0];
1678 int block_length[3];
1679 block_length[0] = block_length[1] = block_length[2] = 1;
1682 MPI_Datatype send_type;
1683 MPI_Type_create_struct(
1684 3, block_length, displacements, typelist, &send_type);
1685 MPI_Type_commit(&send_type);
1686 datatypes.push_back(send_type);
1689 int tag = this->
compute_tag(nproc, my_rank, p, 1);
1691 send_req.push_back(tr1);
1692 MPI_Isend(values_send + offset_nnz,
1706 if (nnz_recv[
Color][my_rank] != 0)
1709 double* values_send = matrix_pt[
Color]->value();
1710 int* row_start_send = matrix_pt[
Color]->row_start();
1711 int* column_index_send = matrix_pt[
Color]->column_index();
1715 matrix_pt[
Color]->first_row(my_rank);
1718 unsigned offset_nnz_send = row_start_send[offset_n_send];
1722 target_first_row[
Color][my_rank];
1726 while (nnz_start_proc[k] != my_rank)
1730 unsigned offset_nnz_recv = nnz_start_index[k];
1735 unsigned n_nnz = nnz_send[
Color][my_rank];
1736 for (
unsigned j = 0; j < n_nnz; j++)
1738 values_recv[offset_nnz_recv + j] = values_send[offset_nnz_send + j];
1739 column_index_recv[offset_nnz_recv + j] =
1740 column_index_send[offset_nnz_send + j];
1745 for (
unsigned j = 0; j < n_n; j++)
1747 row_start_recv[offset_n_recv + j] = row_start_send[offset_n_send + j];
1752 LinearAlgebraDistribution* temp_dist_pt =
1754 target_first_row[
Color][my_rank],
1755 target_nrow_local[
Color][my_rank]);
1758 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1759 delete temp_dist_pt;
1766 Vector<MPI_Status> recv_stat(c_recv);
1767 MPI_Waitall(c_recv, &recv_req[0], &recv_stat[0]);
1775 unsigned nproc_contrib = nnz_start_index.size();
1776 for (
unsigned j = 0; j < nproc_contrib; j++)
1779 target_first_row[
Color][my_rank];
1781 unsigned nnz_inc = nnz_start_index[j] - row_start_recv[first];
1782 for (
unsigned k = first; k < last; k++)
1784 row_start_recv[k] += nnz_inc;
1787 row_start_recv[target_nrow_local[
Color][my_rank]] = int(nnz_total);
1790 local_matrix_pt->build_without_copy(matrix_pt[
Color]->ncol(),
1799 Vector<MPI_Status> send_stat(c_send);
1800 MPI_Waitall(c_send, &send_req[0], &send_stat[0]);
1806 unsigned ndatatypes = datatypes.size();
1807 for (
unsigned i = 0;
i < ndatatypes;
i++)
1809 MPI_Type_free(&datatypes[
i]);
1824 unsigned* nnz_recv_temp =
new unsigned[nproc *
Nprec];
1825 for (
unsigned j = 0; j < nproc *
Nprec; j++)
1827 nnz_recv_temp[j] = 0;
1832 for (
unsigned i = 0;
i <
Nprec;
i++)
1836 if (!matrix_pt[
i]->distributed())
1842 LinearAlgebraDistribution* temp_dist_pt =
1843 new LinearAlgebraDistribution(
1847 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1848 delete temp_dist_pt;
1851 double* values_pt = matrix_pt[
i]->value();
1852 int* column_index_pt = matrix_pt[
i]->column_index();
1853 int* row_start_pt = matrix_pt[
i]->row_start();
1856 local_matrix_pt->build_without_copy(matrix_pt[
i]->ncol(),
1857 matrix_pt[
i]->nnz(),
1872 unsigned nrow = matrix_pt[
i]->nrow();
1875 target_first_row[
i].resize(nproc);
1876 target_nrow_local[
i].resize(nproc);
1878 for (
unsigned p = 0; p < nproc_local; p++)
1881 target_first_row[
i][pp] =
1882 unsigned(
double(p * nrow) /
double(nproc_local));
1884 for (
unsigned p = 0; p < nproc_local - 1; p++)
1887 target_nrow_local[
i][pp] =
1888 target_first_row[
i][pp + 1] - target_first_row[
i][pp];
1891 target_nrow_local[
i][last_local_proc] =
1892 nrow - target_first_row[
i][last_local_proc];
1895 Vector<unsigned> current_first_row(nproc);
1896 Vector<unsigned> current_nrow_local(nproc);
1897 for (
unsigned p = 0; p < nproc; p++)
1899 current_first_row[p] = matrix_pt[
i]->first_row(p);
1900 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
1911 for (
unsigned p = 0; p < nproc; p++)
1914 if ((target_first_row[
i][p] <
1915 (current_first_row[my_rank] + current_nrow_local[my_rank])) &&
1916 (current_first_row[my_rank] <
1917 (target_first_row[
i][p] + target_nrow_local[
i][p])))
1920 std::max(current_first_row[my_rank], target_first_row[
i][p]);
1923 (current_first_row[my_rank] + current_nrow_local[my_rank]),
1924 (target_first_row[
i][p] + target_nrow_local[
i][p])) -
1929 if ((target_first_row[
i][my_rank] <
1930 (current_first_row[p] + current_nrow_local[p])) &&
1931 (current_first_row[p] < (target_first_row[
i][my_rank] +
1932 target_nrow_local[
i][my_rank])))
1935 std::max(current_first_row[p], target_first_row[
i][my_rank]);
1937 std::min((current_first_row[p] + current_nrow_local[p]),
1938 (target_first_row[
i][my_rank] +
1939 target_nrow_local[
i][my_rank])) -
1945 nnz_send[
i].resize(nproc);
1949 for (
unsigned p = 0; p < nproc; p++)
1953 int* row_start = matrix_pt[
i]->row_start();
1962 nnz_recv[
i].resize(nproc);
1965 for (
unsigned p = 0; p < nproc; p++)
1976 int tag = this->
compute_tag(nproc, my_rank, p, 0);
1979 MPI_Isend(&nnz_send[
i][p],
1994 nnz_recv_temp[(
i * nproc) + p] = nnz_send[
i][p];
2001 for (
unsigned i = 0;
i <
Nprec;
i++)
2004 nnz_recv[
i].resize(nproc);
2007 for (
unsigned pp = 0; pp < nproc; pp++)
2010 unsigned p = (nproc + my_rank - pp) % nproc;
2018 int tag = this->
compute_tag(nproc, p, my_rank, 0);
2028 nnz_recv[
i][p] = nnz_temp;
2035 nnz_recv[
i][p] = nnz_send[
i][p];
2043 unsigned nnz_total = 0;
2044 for (
unsigned p = 0; p < nproc; p++)
2046 nnz_total += nnz_recv[
Color][p];
2050 Vector<unsigned> nnz_start_proc;
2051 Vector<unsigned> nnz_start_index;
2052 unsigned row_ptr = target_first_row[
Color][my_rank];
2054 unsigned nnz_ptr = 0;
2055 for (p = 0; p < int(nproc); p++)
2060 nnz_start_proc.push_back(p);
2061 nnz_start_index.push_back(nnz_ptr);
2062 nnz_ptr += nnz_recv[
Color][p];
2069 Vector<MPI_Datatype> datatypes;
2072 double* values_recv =
new double[nnz_total];
2073 int* column_index_recv =
new int[nnz_total];
2074 int* row_start_recv =
new int[target_nrow_local[
Color][my_rank] + 1];
2079 unsigned c_recv = 0;
2080 Vector<MPI_Request> recv_req;
2083 for (
unsigned p = 0; p < nproc; p++)
2089 if (nnz_recv[
Color][p] != 0)
2097 MPI_Datatype datatype_values;
2098 MPI_Type_contiguous(
2099 int(nnz_recv[
Color][p]), MPI_DOUBLE, &datatype_values);
2100 MPI_Type_commit(&datatype_values);
2101 datatypes.push_back(datatype_values);
2104 MPI_Datatype datatype_column_index;
2105 MPI_Type_contiguous(
2106 int(nnz_recv[
Color][p]), MPI_INT, &datatype_column_index);
2107 MPI_Type_commit(&datatype_column_index);
2108 datatypes.push_back(datatype_column_index);
2111 MPI_Datatype datatype_row_start;
2114 &datatype_row_start);
2115 MPI_Type_commit(&datatype_row_start);
2116 datatypes.push_back(datatype_row_start);
2119 MPI_Datatype typelist[3];
2120 typelist[0] = datatype_values;
2121 typelist[1] = datatype_column_index;
2122 typelist[2] = datatype_row_start;
2130 while (nnz_start_proc[k] != p)
2134 int offset_nnz = nnz_start_index[k];
2137 MPI_Aint displacements[3];
2138 MPI_Get_address(values_recv + offset_nnz, &displacements[0]);
2139 MPI_Get_address(column_index_recv + offset_nnz, &displacements[1]);
2140 MPI_Get_address(row_start_recv + offset_n, &displacements[2]);
2141 for (
int j = 2; j >= 0; j--)
2143 displacements[j] -= displacements[0];
2147 int block_length[3];
2148 block_length[0] = block_length[1] = block_length[2] = 1;
2151 MPI_Datatype recv_type;
2152 MPI_Type_create_struct(
2153 3, block_length, displacements, typelist, &recv_type);
2154 MPI_Type_commit(&recv_type);
2155 datatypes.push_back(recv_type);
2158 int tag = this->
compute_tag(nproc, p, my_rank, 1);
2160 recv_req.push_back(tr1);
2161 MPI_Irecv(values_recv + offset_nnz,
2176 unsigned c_send = 0;
2177 Vector<MPI_Request> send_req;
2181 for (
unsigned i = 0;
i <
Nprec;
i++)
2184 double* values_send = matrix_pt[
i]->value();
2185 int* row_start_send = matrix_pt[
i]->row_start();
2186 int* column_index_send = matrix_pt[
i]->column_index();
2189 for (
unsigned p = 0; p < nproc; p++)
2195 if (nnz_send[
i][p] != 0)
2203 MPI_Datatype datatype_values;
2204 MPI_Type_contiguous(
2205 int(nnz_send[
i][p]), MPI_DOUBLE, &datatype_values);
2206 MPI_Type_commit(&datatype_values);
2207 datatypes.push_back(datatype_values);
2210 MPI_Datatype datatype_column_index;
2211 MPI_Type_contiguous(
2212 int(nnz_send[
i][p]), MPI_INT, &datatype_column_index);
2213 MPI_Type_commit(&datatype_column_index);
2214 datatypes.push_back(datatype_column_index);
2217 MPI_Datatype datatype_row_start;
2218 MPI_Type_contiguous(
2220 MPI_Type_commit(&datatype_row_start);
2221 datatypes.push_back(datatype_row_start);
2224 MPI_Datatype typelist[3];
2225 typelist[0] = datatype_values;
2226 typelist[1] = datatype_column_index;
2227 typelist[2] = datatype_row_start;
2234 int offset_nnz = row_start_send[offset_n];
2237 MPI_Aint displacements[3];
2238 MPI_Get_address(values_send + offset_nnz, &displacements[0]);
2239 MPI_Get_address(column_index_send + offset_nnz,
2241 MPI_Get_address(row_start_send + offset_n, &displacements[2]);
2242 for (
int j = 2; j >= 0; j--)
2244 displacements[j] -= displacements[0];
2248 int block_length[3];
2249 block_length[0] = block_length[1] = block_length[2] = 1;
2252 MPI_Datatype send_type;
2253 MPI_Type_create_struct(
2254 3, block_length, displacements, typelist, &send_type);
2255 MPI_Type_commit(&send_type);
2256 datatypes.push_back(send_type);
2259 int tag = this->
compute_tag(nproc, my_rank, p, 1);
2261 send_req.push_back(tr1);
2262 MPI_Isend(values_send + offset_nnz,
2276 if (nnz_recv[
Color][my_rank] != 0)
2279 double* values_send = matrix_pt[
Color]->value();
2280 int* row_start_send = matrix_pt[
Color]->row_start();
2281 int* column_index_send = matrix_pt[
Color]->column_index();
2285 matrix_pt[
Color]->first_row(my_rank);
2288 unsigned offset_nnz_send = row_start_send[offset_n_send];
2292 target_first_row[
Color][my_rank];
2296 while (nnz_start_proc[k] != my_rank)
2300 unsigned offset_nnz_recv = nnz_start_index[k];
2305 unsigned n_nnz = nnz_send[
Color][my_rank];
2306 for (
unsigned j = 0; j < n_nnz; j++)
2308 values_recv[offset_nnz_recv + j] = values_send[offset_nnz_send + j];
2309 column_index_recv[offset_nnz_recv + j] =
2310 column_index_send[offset_nnz_send + j];
2315 for (
unsigned j = 0; j < n_n; j++)
2317 row_start_recv[offset_n_recv + j] = row_start_send[offset_n_send + j];
2322 LinearAlgebraDistribution* temp_dist_pt =
2324 target_first_row[
Color][my_rank],
2325 target_nrow_local[
Color][my_rank]);
2328 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
2329 delete temp_dist_pt;
2336 Vector<MPI_Status> recv_stat(c_recv);
2337 MPI_Waitall(c_recv, &recv_req[0], &recv_stat[0]);
2345 unsigned nproc_contrib = nnz_start_index.size();
2346 for (
unsigned j = 0; j < nproc_contrib; j++)
2349 target_first_row[
Color][my_rank];
2351 unsigned nnz_inc = nnz_start_index[j] - row_start_recv[first];
2352 for (
unsigned k = first; k < last; k++)
2354 row_start_recv[k] += nnz_inc;
2357 row_start_recv[target_nrow_local[
Color][my_rank]] = int(nnz_total);
2360 local_matrix_pt->build_without_copy(matrix_pt[
Color]->ncol(),
2369 Vector<MPI_Status> send_stat(c_recv);
2370 MPI_Waitall(c_send, &send_req[0], &send_stat[0]);
2376 unsigned ndatatypes = datatypes.size();
2377 for (
unsigned i = 0;
i < ndatatypes;
i++)
2379 MPI_Type_free(&datatypes[
i]);
2388 if (matrix_pt[0]->distributed())
2390 delete local_matrix_pt;
2394 for (
unsigned i = 0;
i <
Nprec;
i++)
2408 Vector<DoubleVector>& z)
2414 std::ostringstream error_message;
2415 error_message <<
"The preconditioners have not been setup.";
2416 throw OomphLibError(
2417 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2421 if (r.size() !=
Nprec)
2423 std::ostringstream error_message;
2424 error_message <<
"This PreconditionerArray has " <<
Nprec
2425 <<
" preconditioners but r only contains " << r.size()
2426 <<
" preconditioners.";
2427 throw OomphLibError(
2428 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2432 if (z.size() !=
Nprec)
2434 std::ostringstream error_message;
2435 error_message <<
"This PreconditionerArray has " <<
Nprec
2436 <<
" preconditioners but z only contains " << z.size()
2437 <<
" preconditioners.";
2438 throw OomphLibError(
2439 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
2443 for (
unsigned i = 0;
i <
Nprec;
i++)
2447 std::ostringstream error_message;
2448 error_message <<
"The distribution of r[" <<
i <<
"] does not have the"
2449 <<
" the same distribution as the matrix_pt[" <<
i
2450 <<
"] that was passed to setup_preconditioners(...)";
2451 throw OomphLibError(error_message.str(),
2452 OOMPH_CURRENT_FUNCTION,
2453 OOMPH_EXCEPTION_LOCATION);
2468 Vector<MPI_Request> send_reqs;
2469 Vector<MPI_Request> recv_reqs;
2475 double* local_r_values = local_r.values_pt();
2481 for (
unsigned i = 0;
i <
Nprec;
i++)
2483 if (r[
i].distributed())
2486 unsigned current_first_row = r[
i].first_row();
2489 for (
unsigned p = 0; p < nproc; p++)
2501 int tag = this->
compute_tag(nproc, my_rank, p, 0);
2503 MPI_Isend(
const_cast<double*
>(r[
i].values_pt()) + offset_n,
2510 send_reqs.push_back(tr);
2520 int tag = this->
compute_tag(nproc, p, my_rank, 0);
2522 MPI_Irecv(local_r_values + offset_n,
2529 recv_reqs.push_back(tr);
2538 if (!r[
Color].distributed())
2541 const double* r_pt = r[
Color].values_pt();
2542 unsigned nrow_local = local_r.nrow_local();
2543 for (
unsigned i = 0;
i < nrow_local;
i++)
2545 local_r_values[
i] = r_pt[
i];
2551 const double* r_pt = r[
Color].values_pt();
2554 unsigned current_first_row = r[
Color].first_row();
2563 unsigned offset_n_send =
2567 unsigned offset_n_recv =
2572 for (
unsigned j = 0; j < n_n; j++)
2574 local_r_values[offset_n_recv + j] = r_pt[offset_n_send + j];
2580 unsigned n_recv = recv_reqs.size();
2583 MPI_Waitall(n_recv, &recv_reqs[0], MPI_STATUS_IGNORE);
2589 DoubleVector local_z;
2594 double* local_z_values = local_z.values_pt();
2597 for (
unsigned i = 0;
i <
Nprec;
i++)
2602 z[
i].build(r[
i].distribution_pt(), 0.0);
2607 for (
unsigned i = 0;
i <
Nprec;
i++)
2609 if (r[
i].distributed())
2612 unsigned current_first_row = r[
i].first_row();
2615 for (
unsigned p = 0; p < nproc; p++)
2627 int tag = this->
compute_tag(nproc, my_rank, p, 0);
2629 MPI_Irecv(z[
i].values_pt() + offset_n,
2636 recv_reqs.push_back(tr);
2646 int tag = this->
compute_tag(nproc, p, my_rank, 0);
2648 MPI_Isend(local_z_values + offset_n,
2655 send_reqs.push_back(tr);
2678 MPI_Isend(local_z_values,
2685 send_reqs.push_back(tr);
2689 int p = my_local_rank;
2698 MPI_Irecv(z[
i].values_pt(),
2705 recv_reqs.push_back(tr);
2711 if (!r[
Color].distributed())
2714 double* z_pt = z[
Color].values_pt();
2715 unsigned nrow_local = local_z.nrow_local();
2716 for (
unsigned i = 0;
i < nrow_local;
i++)
2718 z_pt[
i] = local_z_values[
i];
2724 double* z_pt = z[
Color].values_pt();
2727 unsigned current_first_row = r[
Color].first_row();
2736 unsigned offset_n_send =
2740 unsigned offset_n_recv =
2745 for (
unsigned j = 0; j < n_n; j++)
2747 z_pt[offset_n_send + j] = local_z_values[offset_n_recv + j];
2754 n_recv = recv_reqs.size();
2757 MPI_Waitall(n_recv, &recv_reqs[0], MPI_STATUS_IGNORE);
2762 unsigned n_send = send_reqs.size();
2765 MPI_Waitall(n_send, &send_reqs[0], MPI_STATUS_IGNORE);
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
unsigned first_row() const
access function for the first row on this processor
void solve_preconditioners(const Vector< DoubleVector > &r, Vector< DoubleVector > &z)
Applies each preconditioner to the corresponding vector in r and z.
unsigned Method
the communication method in the setup_preconditioners(...) method
Vector< Vector< unsigned > > First_row_for_proc
Storage (indexed [i][j]) for the first row that will be sent from this processor to processor j for p...
Vector< unsigned > First_proc_for_prec
The first_row component of the distribution of the processors over the preconditioners.
unsigned Color
the Color of this processor (or the preconditioner number)
Vector< Vector< unsigned > > Nrow_local_for_proc
Storage (indexed [i][j]) for the nrow_local that will be sent from this processor to processor j for ...
Vector< Vector< unsigned > > First_row_from_proc
Storage (indexed [i][j]) for the first row that will be received by this processor from processor j f...
int compute_tag(const int &nproc, const int &source, const int &dest, const int &type)
helper method for computing the MPI_Isend and MPI_Irecv tags
OomphCommunicator * Global_communicator_pt
pointer to the global communicator for this preconditioner array
void setup_preconditioners(Vector< CRDoubleMatrix * > matrix_pt, Vector< Preconditioner * > prec_pt, const OomphCommunicator *comm_pt)
Setup the preconditioners. Sets up each preconditioner in the array for the corresponding matrix in t...
OomphCommunicator * Local_communicator_pt
Vector of communicators for the preconditioners.
Preconditioner * Preconditioner_pt
The pointer to the local preconditioner on this processor.
void clean_up_memory()
Clean up memory.
unsigned Nprec
the number of preconditioner in the array
Vector< Vector< unsigned > > Nrow_local_from_proc
Storage (indexed [i][j]) for the nrow_local that will be received by this processor from processor j ...
Vector< LinearAlgebraDistribution * > Distribution_pt
Vector< unsigned > Nproc_for_prec
The nrow_local component of the distribution of the processors over the preconditioners.
void setup(DoubleMatrixBase *matrix_pt)
Setup the preconditioner: store the matrix pointer and the communicator pointer then call preconditio...
virtual void preconditioner_solve(const DoubleVector &r, DoubleVector &z)=0
Apply the preconditioner. Pure virtual generic interface function. This method should apply the preco...
//////////////////////////////////////////////////////////////////// ////////////////////////////////...
OomphInfo oomph_info
Single (global) instantiation of the OomphInfo object – this is used throughout the library as a "rep...