48 std::ostringstream error_message;
50 <<
"This multi vector does not own its data (i.e. data has been "
51 <<
"passed in via set_external_values() and therefore "
52 <<
"cannot be redistributed";
54 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
58 if (dist_pt->
nrow() != this->nrow())
60 std::ostringstream error_message;
61 error_message <<
"The number of global rows in the new distribution ("
62 << dist_pt->
nrow() <<
") is not equal to the number"
63 <<
" of global rows in the current distribution ("
64 << this->
nrow() <<
").\n";
66 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
73 std::ostringstream error_message;
74 error_message <<
"The new distribution and the current distribution must "
75 <<
"have the same communicator.";
77 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
85 const unsigned n_vector = this->
Nvector;
99 for (
int i = 0;
i < nproc;
i++)
116 for (
int p = 0; p < nproc; p++)
119 if ((new_first_row_data[p] < (current_first_row_data[my_rank] +
120 current_nrow_local_data[my_rank])) &&
121 (current_first_row_data[my_rank] <
122 (new_first_row_data[p] + new_nrow_local_data[p])))
124 new_first_row_for_proc[p] =
125 std::max(current_first_row_data[my_rank], new_first_row_data[p]);
126 new_nrow_local_for_proc[p] =
127 std::min((current_first_row_data[my_rank] +
128 current_nrow_local_data[my_rank]),
129 (new_first_row_data[p] + new_nrow_local_data[p])) -
130 new_first_row_for_proc[p];
134 if ((new_first_row_data[my_rank] <
135 (current_first_row_data[p] + current_nrow_local_data[p])) &&
136 (current_first_row_data[p] <
137 (new_first_row_data[my_rank] + new_nrow_local_data[my_rank])))
139 new_first_row_from_proc[p] =
140 std::max(current_first_row_data[p], new_first_row_data[my_rank]);
141 new_nrow_local_from_proc[p] =
143 (current_first_row_data[p] + current_nrow_local_data[p]),
144 (new_first_row_data[my_rank] + new_nrow_local_data[my_rank])) -
145 new_first_row_from_proc[p];
150 double** temp_data =
new double*[n_vector];
151 double* contiguous_temp_data =
152 new double[n_vector * new_nrow_local_data[my_rank]];
153 for (
unsigned v = 0; v < n_vector; ++v)
156 &contiguous_temp_data[v * new_nrow_local_data[my_rank]];
161 if (new_nrow_local_for_proc[my_rank] != 0)
164 new_first_row_for_proc[my_rank] - current_first_row_data[my_rank];
166 new_first_row_for_proc[my_rank] - new_first_row_data[my_rank];
167 for (
unsigned i = 0;
i < new_nrow_local_for_proc[my_rank];
i++)
169 for (
unsigned v = 0; v < n_vector; ++v)
171 temp_data[v][k +
i] =
Values[v][j +
i];
177 for (
int p = 1; p < nproc; p++)
180 unsigned dest_p = (my_rank + p) % nproc;
183 unsigned source_p = (nproc + my_rank - p) % nproc;
187 for (
unsigned v = 0; v < n_vector; v++)
189 MPI_Sendrecv(
Values[v] + new_first_row_for_proc[dest_p] -
190 current_first_row_data[my_rank],
191 new_nrow_local_for_proc[dest_p],
195 temp_data[v] + new_first_row_from_proc[source_p] -
196 new_first_row_data[my_rank],
197 new_nrow_local_from_proc[source_p],
216 double** temp_data =
new double*[n_vector];
218 double* contiguous_temp_data =
new double[n_vector * n_local_data];
219 for (
unsigned v = 0; v < n_vector; ++v)
221 temp_data[v] = &contiguous_temp_data[v * n_local_data];
222 for (
unsigned i = 0;
i < n_local_data;
i++)
230 double*
values =
new double[this->
nrow() * n_vector];
231 for (
unsigned v = 0; v < n_vector; v++)
237 int* dist_first_row =
new int[nproc];
238 int* dist_nrow_local =
new int[nproc];
239 for (
int p = 0; p < nproc; p++)
249 for (
unsigned v = 0; v < n_vector; v++)
266 delete[] temp_data[0];
270 delete[] dist_first_row;
271 delete[] dist_nrow_local;
284 double** temp_data =
new double*[n_vector];
285 double* contiguous_temp_data =
new double[n_vector * n_local_data];
288 for (
unsigned v = 0; v < n_vector; v++)
290 temp_data[v] = &contiguous_temp_data[v * n_local_data];
291 for (
unsigned i = 0;
i < n_local_data;
i++)
bool distributed() const
distribution is serial or distributed
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
unsigned nrow() const
access function to the number of global rows.
unsigned nrow_local() const
access function for the num of local rows on this processor.
unsigned first_row() const
access function for the first row on this processor
void build_distribution(const LinearAlgebraDistribution *const dist_pt)
setup the distribution of this distributable linear algebra object
void setup_doublevector_representation()
compute the A-norm using the matrix at matrix_pt
void redistribute(const LinearAlgebraDistribution *const &dist_pt)
Allows are external data to be used by this vector. WARNING: The size of the external data must corre...
unsigned Nvector
The number of vectors.
double ** Values
the local data, need a pointer to a pointer so that the individual vectors can be extracted
double ** values()
access function to the underlying values
bool Internal_values
Boolean flag to indicate whether the vector's data (values_pt) is owned by this vector.
Describes the distribution of a distributable linear algebra type object. Typically this is a contain...
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed
unsigned first_row() const
access function for the first row on this processor. If not distributed then this is just zero.
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
unsigned nrow() const
access function to the number of global rows.
unsigned nrow_local() const
access function for the num of local rows on this processor. If no MPI then Nrow is returned.
An oomph-lib wrapper to the MPI_Comm communicator object. Just contains an MPI_Comm object (which is ...
An OomphLibError object which should be thrown when an run-time error is encountered....
//////////////////////////////////////////////////////////////////// ////////////////////////////////...