Reference documentation for deal.II version 8.1.0
constraint_matrix.templates.h
1 // ---------------------------------------------------------------------
2 // @f$Id: constraint_matrix.templates.h 31932 2013-12-08 02:15:54Z heister @f$
3 //
4 // Copyright (C) 1999 - 2013 by the deal.II authors
5 //
6 // This file is part of the deal.II library.
7 //
8 // The deal.II library is free software; you can use it, redistribute
9 // it, and/or modify it under the terms of the GNU Lesser General
10 // Public License as published by the Free Software Foundation; either
11 // version 2.1 of the License, or (at your option) any later version.
12 // The full text of the license can be found in the file LICENSE at
13 // the top level of the deal.II distribution.
14 //
15 // ---------------------------------------------------------------------
16 
17 
18 #ifndef __deal2__constraint_matrix_templates_h
19 #define __deal2__constraint_matrix_templates_h
20 
21 
22 #include <deal.II/lac/constraint_matrix.h>
23 
24 #include <deal.II/base/table.h>
25 #include <deal.II/lac/full_matrix.h>
26 #include <deal.II/lac/sparsity_pattern.h>
27 #include <deal.II/lac/sparse_matrix.h>
28 #include <deal.II/lac/block_sparsity_pattern.h>
29 #include <deal.II/lac/block_sparse_matrix.h>
30 #include <deal.II/lac/parallel_vector.h>
31 #include <deal.II/lac/parallel_block_vector.h>
32 #include <deal.II/lac/petsc_parallel_vector.h>
33 #include <deal.II/lac/petsc_vector.h>
34 #include <deal.II/lac/trilinos_vector.h>
35 
36 #include <iomanip>
37 
39 
40 
41 template<typename number>
42 void
44  SparseMatrix<number> &condensed) const
45 {
46  // create two dummy vectors and enter the
47  // other function
48  Vector<number> dummy (0);
49  condense (uncondensed, dummy, condensed, dummy);
50 }
51 
52 
53 
54 template<typename number>
55 void
57 {
58  Vector<number> dummy (0);
59  condense (uncondensed, dummy);
60 }
61 
62 
63 
64 template <typename number>
65 void
67 {
68  BlockVector<number> dummy (0);
69  condense (uncondensed, dummy);
70 }
71 
72 
73 
74 template<class VectorType>
75 void
76 ConstraintMatrix::condense (const VectorType &uncondensed,
77  VectorType &condensed) const
78 {
79  Assert (sorted == true, ExcMatrixNotClosed());
80  AssertDimension (condensed.size()+n_constraints(), uncondensed.size());
81 
82  // store for each line of the
83  // vector its new line number after
84  // compression. If the shift is -1,
85  // this line will be condensed away
86  std::vector<int> new_line;
87 
88  new_line.reserve (uncondensed.size());
89 
90  std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
91  size_type shift = 0;
92  size_type n_rows = uncondensed.size();
93 
94  if (next_constraint == lines.end())
95  // if no constraint is to be handled
96  for (size_type row=0; row!=n_rows; ++row)
97  new_line.push_back (row);
98  else
99  for (size_type row=0; row!=n_rows; ++row)
100  if (row == next_constraint->line)
101  {
102  // this line is constrained
103  new_line.push_back (-1);
104  // note that @p lines is ordered
105  ++shift;
106  ++next_constraint;
107  if (next_constraint == lines.end())
108  // nothing more to do; finish rest
109  // of loop
110  {
111  for (size_type i=row+1; i<n_rows; ++i)
112  new_line.push_back (i-shift);
113  break;
114  };
115  }
116  else
117  new_line.push_back (row-shift);
118 
119 
120  next_constraint = lines.begin();
121  // note: in this loop we need not check
122  // whether @p next_constraint is a valid
123  // iterator, since @p next_constraint is
124  // only evaluated so often as there are
125  // entries in new_line[*] which tells us
126  // which constraints exist
127  for (size_type row=0; row<uncondensed.size(); ++row)
128  if (new_line[row] != -1)
129  // line not constrained
130  // copy entry
131  condensed(new_line[row]) += uncondensed(row);
132 
133  else
134  // line must be distributed
135  {
136  for (size_type q=0; q!=next_constraint->entries.size(); ++q)
137  condensed(new_line[next_constraint->entries[q].first])
138  +=
139  uncondensed(row) * next_constraint->entries[q].second;
140 
141  ++next_constraint;
142  };
143 }
144 
145 
146 
147 template <class VectorType>
148 void
149 ConstraintMatrix::condense (VectorType &vec) const
150 {
151  Assert (sorted == true, ExcMatrixNotClosed());
152 
153  // distribute all entries, and set them to zero. do so in
154  // two loops because in the first one we need to add to elements
155  // and in the second one we need to set elements to zero. for
156  // parallel vectors, this can only work if we can put a compress()
157  // in between, but we don't want to call compress() twice per entry
158  for (std::vector<ConstraintLine>::const_iterator
159  constraint_line = lines.begin();
160  constraint_line!=lines.end(); ++constraint_line)
161  {
162  // in case the constraint is
163  // inhomogeneous, this function is not
164  // appropriate. Throw an exception.
165  Assert (constraint_line->inhomogeneity == 0.,
166  ExcMessage ("Inhomogeneous constraint cannot be condensed "
167  "without any matrix specified."));
168 
169  const typename VectorType::value_type old_value = vec(constraint_line->line);
170  for (size_type q=0; q!=constraint_line->entries.size(); ++q)
171  if (vec.in_local_range(constraint_line->entries[q].first) == true)
172  vec(constraint_line->entries[q].first)
173  += (static_cast<typename VectorType::value_type>
174  (old_value) *
175  constraint_line->entries[q].second);
176  }
177 
178  vec.compress(VectorOperation::add);
179 
180  for (std::vector<ConstraintLine>::const_iterator
181  constraint_line = lines.begin();
182  constraint_line!=lines.end(); ++constraint_line)
183  if (vec.in_local_range(constraint_line->line) == true)
184  vec(constraint_line->line) = 0.;
185 
186  vec.compress(VectorOperation::insert);
187 }
188 
189 
190 
191 template<typename number, class VectorType>
192 void
194  const VectorType &uncondensed_vector,
195  SparseMatrix<number> &condensed,
196  VectorType &condensed_vector) const
197 {
198  // check whether we work on real vectors
199  // or we just used a dummy when calling
200  // the other function above.
201  const bool use_vectors = (uncondensed_vector.size() == 0 &&
202  condensed_vector.size() == 0) ? false : true;
203 
204  const SparsityPattern &uncondensed_struct = uncondensed.get_sparsity_pattern ();
205 
206  Assert (sorted == true, ExcMatrixNotClosed());
207  Assert (uncondensed_struct.is_compressed() == true, ExcMatrixNotClosed());
208  Assert (condensed.get_sparsity_pattern().is_compressed() == true, ExcMatrixNotClosed());
209  Assert (uncondensed_struct.n_rows() == uncondensed_struct.n_cols(),
210  ExcNotQuadratic());
211  Assert (condensed.n() == condensed.m(),
212  ExcNotQuadratic());
213  AssertDimension (condensed.n()+n_constraints(), uncondensed.n());
214  if (use_vectors == true)
215  {
216  AssertDimension (condensed_vector.size()+n_constraints(),
217  uncondensed_vector.size());
218  AssertDimension (condensed_vector.size(), condensed.m());
219  }
220 
221  // store for each line of the matrix
222  // its new line number
223  // after compression. If the shift is
224  // -1, this line will be condensed away
225  std::vector<int> new_line;
226 
227  new_line.reserve (uncondensed_struct.n_rows());
228 
229  std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
230  size_type shift = 0;
231  const size_type n_rows = uncondensed_struct.n_rows();
232 
233  if (next_constraint == lines.end())
234  // if no constraint is to be handled
235  for (size_type row=0; row!=n_rows; ++row)
236  new_line.push_back (row);
237  else
238  for (size_type row=0; row!=n_rows; ++row)
239  if (row == next_constraint->line)
240  {
241  // this line is constrained
242  new_line.push_back (-1);
243  // note that @p lines is ordered
244  ++shift;
245  ++next_constraint;
246  if (next_constraint == lines.end())
247  // nothing more to do; finish rest
248  // of loop
249  {
250  for (size_type i=row+1; i<n_rows; ++i)
251  new_line.push_back (i-shift);
252  break;
253  };
254  }
255  else
256  new_line.push_back (row-shift);
257 
258 
259  next_constraint = lines.begin();
260 
261  // note: in this loop we need not check
262  // whether @p next_constraint is a valid
263  // iterator, since @p next_constraint is
264  // only evaluated so often as there are
265  // entries in new_line[*] which tells us
266  // which constraints exist
267  for (size_type row=0; row<uncondensed_struct.n_rows(); ++row)
268  if (new_line[row] != -1)
269  {
270  // line not constrained
271  // copy entries if column will not
272  // be condensed away, distribute
273  // otherwise
275  p = uncondensed.begin(row);
276  p != uncondensed.end(row); ++p)
277  if (new_line[p->column()] != -1)
278  condensed.add (new_line[row],
279  new_line[p->column()],
280  p->value());
281  else
282  {
283  // let c point to the
284  // constraint of this column
285  std::vector<ConstraintLine>::const_iterator c = lines.begin();
286  while (c->line != p->column())
287  ++c;
288 
289  for (size_type q=0; q!=c->entries.size(); ++q)
290  // distribute to rows with
291  // appropriate weight
292  condensed.add (new_line[row], new_line[c->entries[q].first],
293  p->value() * c->entries[q].second);
294 
295  // take care of inhomogeneity:
296  // need to subtract this element from the
297  // vector. this corresponds to an
298  // explicit elimination in the respective
299  // row of the inhomogeneous constraint in
300  // the matrix with Gauss elimination
301  if (use_vectors == true)
302  condensed_vector(new_line[row]) -= p->value() *
303  c->inhomogeneity;
304  }
305 
306  if (use_vectors == true)
307  condensed_vector(new_line[row]) += uncondensed_vector(row);
308  }
309  else
310  // line must be distributed
311  {
313  p = uncondensed.begin(row);
314  p != uncondensed.end(row); ++p)
315  // for each column: distribute
316  if (new_line[p->column()] != -1)
317  // column is not constrained
318  for (size_type q=0; q!=next_constraint->entries.size(); ++q)
319  condensed.add (new_line[next_constraint->entries[q].first],
320  new_line[p->column()],
321  p->value() *
322  next_constraint->entries[q].second);
323 
324  else
325  // not only this line but
326  // also this col is constrained
327  {
328  // let c point to the constraint
329  // of this column
330  std::vector<ConstraintLine>::const_iterator c = lines.begin();
331  while (c->line != p->column())
332  ++c;
333 
334  for (size_type r=0; r!=c->entries.size(); ++r)
335  for (size_type q=0; q!=next_constraint->entries.size(); ++q)
336  condensed.add (new_line[next_constraint->entries[q].first],
337  new_line[c->entries[r].first],
338  p->value() *
339  next_constraint->entries[r].second *
340  c->entries[r].second);
341 
342  if (use_vectors == true)
343  for (size_type q=0; q!=next_constraint->entries.size(); ++q)
344  condensed_vector (new_line[next_constraint->entries[q].first])
345  -= p->value() *
346  next_constraint->entries[q].second *
347  c->inhomogeneity;
348  }
349 
350  // condense the vector
351  if (use_vectors == true)
352  for (size_type q=0; q!=next_constraint->entries.size(); ++q)
353  condensed_vector(new_line[next_constraint->entries[q].first])
354  +=
355  uncondensed_vector(row) * next_constraint->entries[q].second;
356 
357  ++next_constraint;
358  };
359 }
360 
361 
362 
363 template<typename number, class VectorType>
364 void
366  VectorType &vec) const
367 {
368  // check whether we work on real vectors
369  // or we just used a dummy when calling
370  // the other function above.
371  const bool use_vectors = vec.size() == 0 ? false : true;
372 
373  const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern ();
374 
375  Assert (sorted == true, ExcMatrixNotClosed());
376  Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
377  Assert (sparsity.n_rows() == sparsity.n_cols(),
378  ExcNotQuadratic());
379  if (use_vectors == true)
380  AssertDimension (vec.size(), sparsity.n_rows());
381 
382  double average_diagonal = 0;
383  for (size_type i=0; i<uncondensed.m(); ++i)
384  average_diagonal += std::fabs (uncondensed.diag_element(i));
385  average_diagonal /= uncondensed.m();
386 
387  // store for each index whether it must be
388  // distributed or not. If entry is
389  // invalid_size_type, no distribution is
390  // necessary. otherwise, the number states
391  // which line in the constraint matrix
392  // handles this index
393  std::vector<size_type> distribute (sparsity.n_rows(),
395 
396  for (size_type c=0; c<lines.size(); ++c)
397  distribute[lines[c].line] = c;
398 
399  const size_type n_rows = sparsity.n_rows();
400  for (size_type row=0; row<n_rows; ++row)
401  {
403  // regular line. loop over cols
404  {
405  for (typename SparseMatrix<number>::iterator
406  entry = uncondensed.begin(row);
407  entry != uncondensed.end(row); ++entry)
408  {
409  const size_type column = entry->column();
410 
411  // end of row reached?
412  // this should not
413  // happen, since we only
414  // operate on compressed
415  // matrices!
417  ExcMatrixNotClosed());
418 
419  if (distribute[column] != numbers::invalid_size_type)
420  // distribute entry at
421  // regular row @p row
422  // and irregular column
423  // sparsity.get_column_numbers()[j];
424  // set old entry to
425  // zero
426  {
427  for (size_type q=0;
428  q!=lines[distribute[column]].entries.size(); ++q)
429  uncondensed.add (row,
430  lines[distribute[column]].entries[q].first,
431  entry->value() *
432  lines[distribute[column]].entries[q].second);
433 
434  // need to subtract this element from the
435  // vector. this corresponds to an
436  // explicit elimination in the respective
437  // row of the inhomogeneous constraint in
438  // the matrix with Gauss elimination
439  if (use_vectors == true)
440  vec(row) -=
441  entry->value() * lines[distribute[column]].inhomogeneity;
442 
443  // set old value to zero
444  entry->value() = 0.;
445  }
446  }
447  }
448  else
449  // row must be distributed
450  {
451  for (typename SparseMatrix<number>::iterator
452  entry = uncondensed.begin(row);
453  entry != uncondensed.end(row); ++entry)
454  {
455  const size_type column = entry->column();
456 
457  // end of row reached?
458  // this should not
459  // happen, since we only
460  // operate on compressed
461  // matrices!
463  ExcMatrixNotClosed());
464 
465  if (distribute[column] == numbers::invalid_size_type)
466  // distribute entry at
467  // irregular row
468  // @p row and regular
469  // column
470  // column. set
471  // old entry to zero
472  {
473  for (size_type q=0;
474  q!=lines[distribute[row]].entries.size(); ++q)
475  uncondensed.add (lines[distribute[row]].entries[q].first,
476  column,
477  entry->value() *
478  lines[distribute[row]].entries[q].second);
479 
480  // set old entry to zero
481  entry->value() = 0.;
482  }
483  else
484  // distribute entry at
485  // irregular row @p row and
486  // irregular column
487  // @p column set old entry
488  // to one on main
489  // diagonal, zero otherwise
490  {
491  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
492  {
493  for (size_type q=0;
494  q!=lines[distribute[column]].entries.size(); ++q)
495  uncondensed.add (lines[distribute[row]].entries[p].first,
496  lines[distribute[column]].entries[q].first,
497  entry->value() *
498  lines[distribute[row]].entries[p].second *
499  lines[distribute[column]].entries[q].second);
500 
501  if (use_vectors == true)
502  vec(lines[distribute[row]].entries[p].first) -=
503  entry->value() * lines[distribute[row]].entries[p].second *
504  lines[distribute[column]].inhomogeneity;
505  }
506 
507  // set old entry to correct
508  // value
509  entry->value() = (row == column ? average_diagonal : 0. );
510  }
511  }
512 
513  // take care of vector
514  if (use_vectors == true)
515  {
516  for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
517  vec(lines[distribute[row]].entries[q].first)
518  += (vec(row) * lines[distribute[row]].entries[q].second);
519 
520  vec(lines[distribute[row]].line) = 0.;
521  }
522  }
523  }
524 }
525 
526 
527 
528 template <typename number, class BlockVectorType>
529 void
531  BlockVectorType &vec) const
532 {
533  // check whether we work on real vectors
534  // or we just used a dummy when calling
535  // the other function above.
536  const bool use_vectors = vec.n_blocks() == 0 ? false : true;
537 
538  const size_type blocks = uncondensed.n_block_rows();
539 
540  const BlockSparsityPattern &
541  sparsity = uncondensed.get_sparsity_pattern ();
542 
543  Assert (sorted == true, ExcMatrixNotClosed());
544  Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
545  Assert (sparsity.n_rows() == sparsity.n_cols(),
546  ExcNotQuadratic());
547  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
548  ExcNotQuadratic());
549  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
550  ExcNotQuadratic());
551  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
552  ExcNotQuadratic());
553 
554  if (use_vectors == true)
555  {
556  AssertDimension (vec.size(), sparsity.n_rows());
557  AssertDimension (vec.n_blocks(), sparsity.n_block_rows());
558  }
559 
560  double average_diagonal = 0;
561  for (size_type b=0; b<uncondensed.n_block_rows(); ++b)
562  for (size_type i=0; i<uncondensed.block(b,b).m(); ++i)
563  average_diagonal += std::fabs (uncondensed.block(b,b).diag_element(i));
564  average_diagonal /= uncondensed.m();
565 
566  const BlockIndices &
567  index_mapping = sparsity.get_column_indices();
568 
569  // store for each index whether it must be
570  // distributed or not. If entry is
571  // numbers::invalid_size_type,
572  // no distribution is necessary.
573  // otherwise, the number states which line
574  // in the constraint matrix handles this
575  // index
576  std::vector<size_type> distribute (sparsity.n_rows(),
578 
579  for (size_type c=0; c<lines.size(); ++c)
580  distribute[lines[c].line] = c;
581 
582  const size_type n_rows = sparsity.n_rows();
583  for (size_type row=0; row<n_rows; ++row)
584  {
585  // get index of this row
586  // within the blocks
587  const std::pair<size_type,size_type>
588  block_index = index_mapping.global_to_local(row);
589  const size_type block_row = block_index.first;
590 
592  // regular line. loop over
593  // all columns and see
594  // whether this column must
595  // be distributed
596  {
597 
598  // to loop over all entries
599  // in this row, we have to
600  // loop over all blocks in
601  // this blockrow and the
602  // corresponding row
603  // therein
604  for (size_type block_col=0; block_col<blocks; ++block_col)
605  {
606  for (typename SparseMatrix<number>::iterator
607  entry = uncondensed.block(block_row, block_col).begin(block_index.second);
608  entry != uncondensed.block(block_row, block_col).end(block_index.second);
609  ++entry)
610  {
611  const size_type global_col
612  = index_mapping.local_to_global(block_col,entry->column());
613 
614  if (distribute[global_col] != numbers::invalid_size_type)
615  // distribute entry at
616  // regular row @p row
617  // and irregular column
618  // global_col; set old
619  // entry to zero
620  {
621  const double old_value = entry->value ();
622 
623  for (size_type q=0;
624  q!=lines[distribute[global_col]].entries.size(); ++q)
625  uncondensed.add (row,
626  lines[distribute[global_col]].entries[q].first,
627  old_value *
628  lines[distribute[global_col]].entries[q].second);
629 
630  // need to subtract this element from the
631  // vector. this corresponds to an
632  // explicit elimination in the respective
633  // row of the inhomogeneous constraint in
634  // the matrix with Gauss elimination
635  if (use_vectors == true)
636  vec(row) -= entry->value() *
637  lines[distribute[global_col]].inhomogeneity;
638 
639  entry->value() = 0.;
640  }
641  }
642  }
643  }
644  else
645  {
646  // row must be
647  // distributed. split the
648  // whole row into the
649  // chunks defined by the
650  // blocks
651  for (size_type block_col=0; block_col<blocks; ++block_col)
652  {
653  for (typename SparseMatrix<number>::iterator
654  entry = uncondensed.block(block_row, block_col).begin(block_index.second);
655  entry != uncondensed.block(block_row, block_col).end(block_index.second);
656  ++entry)
657  {
658  const size_type global_col
659  = index_mapping.local_to_global (block_col, entry->column());
660 
661  if (distribute[global_col] ==
663  // distribute
664  // entry at
665  // irregular
666  // row @p row
667  // and regular
668  // column
669  // global_col. set
670  // old entry to
671  // zero
672  {
673  const double old_value = entry->value();
674 
675  for (size_type q=0;
676  q!=lines[distribute[row]].entries.size(); ++q)
677  uncondensed.add (lines[distribute[row]].entries[q].first,
678  global_col,
679  old_value *
680  lines[distribute[row]].entries[q].second);
681 
682  entry->value() = 0.;
683  }
684  else
685  // distribute entry at
686  // irregular row @p row
687  // and irregular column
688  // @p global_col set old
689  // entry to one if on
690  // main diagonal, zero
691  // otherwise
692  {
693  const double old_value = entry->value ();
694 
695  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
696  {
697  for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
698  uncondensed.add (lines[distribute[row]].entries[p].first,
699  lines[distribute[global_col]].entries[q].first,
700  old_value *
701  lines[distribute[row]].entries[p].second *
702  lines[distribute[global_col]].entries[q].second);
703 
704  if (use_vectors == true)
705  vec(lines[distribute[row]].entries[p].first) -=
706  old_value * lines[distribute[row]].entries[p].second *
707  lines[distribute[global_col]].inhomogeneity;
708  }
709 
710  entry->value() = (row == global_col ? average_diagonal : 0. );
711  }
712  }
713  }
714 
715  // take care of vector
716  if (use_vectors == true)
717  {
718  for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
719  vec(lines[distribute[row]].entries[q].first)
720  += (vec(row) * lines[distribute[row]].entries[q].second);
721 
722  vec(lines[distribute[row]].line) = 0.;
723  }
724  }
725  }
726 }
727 
728 
729 //TODO: I'm sure the followng could be made more elegant by using a bit of
730 //introspection using static member variables of the various vector
731 //classes to dispatch between the different functions, rather than using
732 //knowledge of the individual types
733 
734 // number of functions to select the right implementation for set_zero().
735 namespace internal
736 {
737  namespace ConstraintMatrix
738  {
739  namespace
740  {
742 
743  template<class VEC>
744  void set_zero_parallel(const std::vector<size_type> &cm, VEC &vec, size_type shift = 0)
745  {
746  Assert(!vec.has_ghost_elements(), ExcInternalError());
747  IndexSet locally_owned = vec.locally_owned_elements();
748  for (typename std::vector<size_type>::const_iterator it = cm.begin();
749  it != cm.end(); ++it)
750  {
751  // If shift>0 then we are working on a part of a BlockVector
752  // so vec(i) is actually the global entry i+shift.
753  // We first make sure the line falls into the range of vec,
754  // then check if is part of the local part of the vector, before
755  // finally setting it to 0.
756  if ((*it)<shift)
757  continue;
758  size_type idx = *it - shift;
759  if (idx<vec.size() && locally_owned.is_element(idx))
760  vec(idx) = 0.;
761  }
762  }
763 
764  template<typename Number>
765  void set_zero_parallel(const std::vector<size_type> &cm, parallel::distributed::Vector<Number> &vec, size_type shift = 0)
766  {
767  for (typename std::vector<size_type>::const_iterator it = cm.begin();
768  it != cm.end(); ++it)
769  {
770  // If shift>0 then we are working on a part of a BlockVector
771  // so vec(i) is actually the global entry i+shift.
772  // We first make sure the line falls into the range of vec,
773  // then check if is part of the local part of the vector, before
774  // finally setting it to 0.
775  if ((*it)<shift)
776  continue;
777  size_type idx = *it - shift;
778  if (vec.in_local_range(idx))
779  vec(idx) = 0.;
780  }
781  vec.zero_out_ghosts();
782  }
783 
784  template<class VEC>
785  void set_zero_in_parallel(const std::vector<size_type> &cm, VEC &vec, internal::bool2type<false>)
786  {
787  set_zero_parallel(cm, vec, 0);
788  }
789 
790  // in parallel for BlockVectors
791  template<class VEC>
792  void set_zero_in_parallel(const std::vector<size_type> &cm, VEC &vec, internal::bool2type<true>)
793  {
794  size_type start_shift = 0;
795  for (size_type j=0; j<vec.n_blocks(); ++j)
796  {
797  set_zero_parallel(cm, vec.block(j), start_shift);
798  start_shift += vec.block(j).size();
799  }
800  }
801 
802  template<class VEC>
803  void set_zero_serial(const std::vector<size_type> &cm, VEC &vec)
804  {
805  for (typename std::vector<size_type>::const_iterator it = cm.begin();
806  it != cm.end(); ++it)
807  vec(*it) = 0.;
808  }
809 
810  template<class VEC>
811  void set_zero_all(const std::vector<size_type> &cm, VEC &vec)
812  {
813  set_zero_in_parallel<VEC>(cm, vec, internal::bool2type<IsBlockVector<VEC>::value>());
814  vec.compress(VectorOperation::insert);
815  }
816 
817 
818  template<class T>
819  void set_zero_all(const std::vector<size_type> &cm, ::Vector<T> &vec)
820  {
821  set_zero_serial(cm, vec);
822  }
823 
824  template<class T>
825  void set_zero_all(const std::vector<size_type> &cm, ::BlockVector<T> &vec)
826  {
827  set_zero_serial(cm, vec);
828  }
829  }
830  }
831 }
832 
833 
834 template <class VectorType>
835 void
836 ConstraintMatrix::set_zero (VectorType &vec) const
837 {
838  // since we lines is a private member, we cannot pass it to the functions
839  // above. therefore, copy the content which is cheap
840  std::vector<size_type> constrained_lines(lines.size());
841  for (unsigned int i=0; i<lines.size(); ++i)
842  constrained_lines[i] = lines[i].line;
843  internal::ConstraintMatrix::set_zero_all(constrained_lines, vec);
844 }
845 
846 
847 
848 
849 template <typename VectorType>
850 void
853  const std::vector<size_type> &local_dof_indices,
854  VectorType &global_vector,
855  const FullMatrix<double> &local_matrix) const
856 {
857  Assert (sorted == true, ExcMatrixNotClosed());
858  AssertDimension (local_vector.size(), local_dof_indices.size());
859  AssertDimension (local_matrix.m(), local_dof_indices.size());
860  AssertDimension (local_matrix.n(), local_dof_indices.size());
861 
862  const size_type n_local_dofs = local_vector.size();
863  if (lines.empty())
864  global_vector.add(local_dof_indices, local_vector);
865  else
866  for (size_type i=0; i<n_local_dofs; ++i)
867  {
868  // check whether the current index is
869  // constrained. if not, just write the entry
870  // into the vector. otherwise, need to resolve
871  // the constraint
872  if (is_constrained(local_dof_indices[i]) == false)
873  {
874  global_vector(local_dof_indices[i]) += local_vector(i);
875  continue;
876  }
877 
878  // find the constraint line to the given
879  // global dof index
880  const size_type line_index = calculate_line_index (local_dof_indices[i]);
881  const ConstraintLine *position =
882  lines_cache.size() <= line_index ? 0 : &lines[lines_cache[line_index]];
883 
884  // Gauss elimination of the matrix columns
885  // with the inhomogeneity. Go through them one
886  // by one and again check whether they are
887  // constrained. If so, distribute the constraint
888  const double val = position->inhomogeneity;
889  if (val != 0)
890  for (size_type j=0; j<n_local_dofs; ++j)
891  if (is_constrained(local_dof_indices[j]) == false)
892  global_vector(local_dof_indices[j]) -= val * local_matrix(j,i);
893  else
894  {
895  const double matrix_entry = local_matrix(j,i);
896  if (matrix_entry == 0)
897  continue;
898 
899  const ConstraintLine &position_j =
900  lines[lines_cache[calculate_line_index(local_dof_indices[j])]];
901  for (size_type q=0; q<position_j.entries.size(); ++q)
902  {
903  Assert (!(!local_lines.size()
904  || local_lines.is_element(position_j.entries[q].first))
905  || is_constrained(position_j.entries[q].first) == false,
906  ExcMessage ("Tried to distribute to a fixed dof."));
907  global_vector(position_j.entries[q].first)
908  -= val * position_j.entries[q].second * matrix_entry;
909  }
910  }
911 
912  // now distribute the constraint,
913  // but make sure we don't touch
914  // the entries of fixed dofs
915  for (size_type j=0; j<position->entries.size(); ++j)
916  {
917  Assert (!(!local_lines.size()
918  || local_lines.is_element(position->entries[j].first))
919  || is_constrained(position->entries[j].first) == false,
920  ExcMessage ("Tried to distribute to a fixed dof."));
921  global_vector(position->entries[j].first)
922  += local_vector(i) * position->entries[j].second;
923  }
924  }
925 }
926 
927 
928 
929 template<class VectorType>
930 void
931 ConstraintMatrix::distribute (const VectorType &condensed,
932  VectorType &uncondensed) const
933 {
934  Assert (sorted == true, ExcMatrixNotClosed());
935  AssertDimension (condensed.size()+n_constraints(), uncondensed.size());
936 
937  // store for each line of the new vector
938  // its old line number before
939  // distribution. If the shift is
940  // -1, this line was condensed away
941  std::vector<int> old_line;
942 
943  old_line.reserve (uncondensed.size());
944 
945  std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
946  size_type shift = 0;
947  size_type n_rows = uncondensed.size();
948 
949  if (next_constraint == lines.end())
950  // if no constraint is to be handled
951  for (size_type row=0; row!=n_rows; ++row)
952  old_line.push_back (row);
953  else
954  for (size_type row=0; row!=n_rows; ++row)
955  if (row == next_constraint->line)
956  {
957  // this line is constrained
958  old_line.push_back (-1);
959  // note that @p lines is ordered
960  ++shift;
961  ++next_constraint;
962  if (next_constraint == lines.end())
963  // nothing more to do; finish rest
964  // of loop
965  {
966  for (size_type i=row+1; i<n_rows; ++i)
967  old_line.push_back (i-shift);
968  break;
969  }
970  }
971  else
972  old_line.push_back (row-shift);
973 
974 
975  next_constraint = lines.begin();
976  // note: in this loop we need not check
977  // whether @p next_constraint is a valid
978  // iterator, since @p next_constraint is
979  // only evaluated so often as there are
980  // entries in new_line[*] which tells us
981  // which constraints exist
982  for (size_type line=0; line<uncondensed.size(); ++line)
983  if (old_line[line] != -1)
984  // line was not condensed away
985  uncondensed(line) = condensed(old_line[line]);
986  else
987  {
988  // line was condensed away,
989  // create it newly. first set
990  // it to zero
991  uncondensed(line) = next_constraint->inhomogeneity;
992  // then add the different
993  // contributions
994  for (size_type i=0; i<next_constraint->entries.size(); ++i)
995  uncondensed(line) += (condensed(old_line[next_constraint->entries[i].first]) *
996  next_constraint->entries[i].second);
997  ++next_constraint;
998  };
999 }
1000 
1001 
1002 namespace internal
1003 {
1004  namespace
1005  {
1006  // create an output vector that consists of the input vector's locally owned
1007  // elements plus some ghost elements that need to be imported from elsewhere
1008  //
1009  // this is an operation that is different for all vector types and so we
1010  // need a few overloads
1011 #ifdef DEAL_II_WITH_TRILINOS
1012  void
1013  import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec,
1014  const IndexSet &/*locally_owned_elements*/,
1015  const IndexSet &needed_elements,
1017  const internal::bool2type<false> /*is_block_vector*/)
1018  {
1019  Assert(!vec.has_ghost_elements(),
1021 #ifdef DEAL_II_WITH_MPI
1022  const Epetra_MpiComm *mpi_comm
1023  = dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
1024 
1025  Assert (mpi_comm != 0, ExcInternalError());
1026  output.reinit (needed_elements, mpi_comm->GetMpiComm());
1027 #else
1028  output.reinit (needed_elements, MPI_COMM_WORLD);
1029 #endif
1030  output = vec;
1031  }
1032 #endif
1033 
1034 #ifdef DEAL_II_WITH_PETSC
1035  void
1036  import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec,
1037  const IndexSet &locally_owned_elements,
1038  const IndexSet &needed_elements,
1040  const internal::bool2type<false> /*is_block_vector*/)
1041  {
1042  output.reinit (vec.get_mpi_communicator(), locally_owned_elements, needed_elements);
1043  output = vec;
1044  }
1045 #endif
1046 
1047  template <typename number>
1048  void
1049  import_vector_with_ghost_elements (const parallel::distributed::Vector<number> &vec,
1050  const IndexSet &locally_owned_elements,
1051  const IndexSet &needed_elements,
1053  const internal::bool2type<false> /*is_block_vector*/)
1054  {
1055  // TODO: the in vector might already have all elements. need to find a
1056  // way to efficiently avoid the copy then
1057  const_cast<parallel::distributed::Vector<number>&>(vec).zero_out_ghosts();
1058  output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
1059  output = vec;
1060  output.update_ghost_values();
1061  }
1062 
1063 
1064  // all other vector non-block vector types are sequential and we should
1065  // not have this function called at all -- so throw an exception
1066  template <typename Vector>
1067  void
1068  import_vector_with_ghost_elements (const Vector &/*vec*/,
1069  const IndexSet &/*locally_owned_elements*/,
1070  const IndexSet &/*needed_elements*/,
1071  Vector &/*output*/,
1072  const internal::bool2type<false> /*is_block_vector*/)
1073  {
1074  Assert (false, ExcMessage ("We shouldn't even get here!"));
1075  }
1076 
1077 
1078  // for block vectors, simply dispatch to the individual blocks
1079  template <class VectorType>
1080  void
1081  import_vector_with_ghost_elements (const VectorType &vec,
1082  const IndexSet &locally_owned_elements,
1083  const IndexSet &needed_elements,
1084  VectorType &output,
1085  const internal::bool2type<true> /*is_block_vector*/)
1086  {
1087  output.reinit (vec.n_blocks());
1088 
1089  types::global_dof_index block_start = 0;
1090  for (unsigned int b=0; b<vec.n_blocks(); ++b)
1091  {
1092  import_vector_with_ghost_elements (vec.block(b),
1093  locally_owned_elements.get_view (block_start, block_start+vec.block(b).size()),
1094  needed_elements.get_view (block_start, block_start+vec.block(b).size()),
1095  output.block(b),
1097  block_start += vec.block(b).size();
1098  }
1099 
1100  output.collect_sizes ();
1101  }
1102  }
1103 }
1104 
1105 
1106 template <class VectorType>
1107 void
1108 ConstraintMatrix::distribute (VectorType &vec) const
1109 {
1110  Assert (sorted==true, ExcMatrixNotClosed());
1111 
1112  // if the vector type supports parallel storage and if the vector actually
1113  // does store only part of the vector, distributing is slightly more
1114  // complicated. we might be able to skip the complicated part if one
1115  // processor owns everything and pretend that this is a sequential vector,
1116  // but it is difficult for the other processors to know whether they should
1117  // not do anything or if other processors will create a temporary vector,
1118  // exchange data (requiring communication, maybe even with the processors
1119  // that do not own anything because of that particular parallel model), and
1120  // call compress() finally. the first case here is for the complicated case,
1121  // the last else is for the simple case (sequential vector)
1122  const IndexSet vec_owned_elements = vec.locally_owned_elements();
1123  if (vec.supports_distributed_data == true)
1124  {
1125  // This processor owns only part of the vector. one may think that
1126  // every processor should be able to simply communicate those elements
1127  // it owns and for which it knows that they act as sources to constrained
1128  // DoFs to the owner of these DoFs. This would lead to a scheme where all
1129  // we need to do is to add some local elements to (possibly non-local) ones
1130  // and then call compress().
1131  //
1132  // Alas, this scheme does not work as evidenced by the disaster of bug #51,
1133  // see http://code.google.com/p/dealii/issues/detail?id=51 and the
1134  // reversion of one attempt that implements this in r29662. Rather, we
1135  // need to get a vector that has all the *sources* or constraints we
1136  // own locally, possibly as ghost vector elements, then read from them,
1137  // and finally throw away the ghosted vector. Implement this in the following.
1138  IndexSet needed_elements = vec_owned_elements;
1139 
1140  typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
1141  for (constraint_iterator it = lines.begin();
1142  it != lines.end(); ++it)
1143  if (vec_owned_elements.is_element(it->line))
1144  for (unsigned int i=0; i<it->entries.size(); ++i)
1145  if (!vec_owned_elements.is_element(it->entries[i].first))
1146  needed_elements.add_index(it->entries[i].first);
1147 
1148  VectorType ghosted_vector;
1149  internal::import_vector_with_ghost_elements (vec,
1150  vec_owned_elements, needed_elements,
1151  ghosted_vector,
1153 
1154  for (constraint_iterator it = lines.begin();
1155  it != lines.end(); ++it)
1156  if (vec_owned_elements.is_element(it->line))
1157  {
1158  typename VectorType::value_type
1159  new_value = it->inhomogeneity;
1160  for (unsigned int i=0; i<it->entries.size(); ++i)
1161  new_value += (static_cast<typename VectorType::value_type>
1162  (ghosted_vector(it->entries[i].first)) *
1163  it->entries[i].second);
1165  vec(it->line) = new_value;
1166  }
1167 
1168  // now compress to communicate the entries that we added to
1169  // and that weren't to local processors to the owner
1170  //
1171  // this shouldn't be strictly necessary but it probably doesn't
1172  // hurt either
1173  vec.compress (VectorOperation::insert);
1174  }
1175  else
1176  // purely sequential vector (either because the type doesn't
1177  // support anything else or because it's completely stored
1178  // locally)
1179  {
1180  std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
1181  for (; next_constraint != lines.end(); ++next_constraint)
1182  {
1183  // fill entry in line
1184  // next_constraint.line by adding the
1185  // different contributions
1186  typename VectorType::value_type
1187  new_value = next_constraint->inhomogeneity;
1188  for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
1189  new_value += (static_cast<typename VectorType::value_type>
1190  (vec(next_constraint->entries[i].first)) *
1191  next_constraint->entries[i].second);
1193  vec(next_constraint->line) = new_value;
1194  }
1195  }
1196 }
1197 
1198 
1199 
1200 // Some helper definitions for the local_to_global functions.
1201 namespace internals
1202 {
1203  typedef types::global_dof_index size_type;
1204 
1205  // this struct contains all the information we need to store about each of
1206  // the global entries (global_row): are they obtained directly by some local
1207  // entry (local_row) or some constraints (constraint_position). This is not
1208  // directly used in the user code, but accessed via the GlobalRowsFromLocal.
1209  //
1210  // The actions performed here correspond to reshaping the constraint
1211  // information from global degrees of freedom to local ones (i.e.,
1212  // cell-related DoFs), and also transforming the constraint information from
1213  // compressed row storage (each local dof that is constrained has a list of
1214  // constraint entries associated to it) into compressed column storage based
1215  // on the cell-related DoFs (we have a list of global degrees of freedom,
1216  // and to each we have a list of local rows where the entries come from). To
1217  // increase the speed, we additionally store whether an entry is generated
1218  // directly from the local degrees of freedom or whether it comes from a
1219  // constraint.
1221  {
1222  Distributing (const size_type global_row = numbers::invalid_size_type,
1223  const size_type local_row = numbers::invalid_size_type);
1224  Distributing (const Distributing &in);
1225  Distributing &operator = (const Distributing &in);
1226  bool operator < (const Distributing &in) const
1227  {
1228  return global_row<in.global_row;
1229  };
1230 
1231  size_type global_row;
1232  size_type local_row;
1233  mutable size_type constraint_position;
1234  };
1235 
1236  inline
1237  Distributing::Distributing (const size_type global_row,
1238  const size_type local_row) :
1239  global_row (global_row),
1240  local_row (local_row),
1241  constraint_position (numbers::invalid_size_type) {}
1242 
1243  inline
1244  Distributing::Distributing (const Distributing &in)
1245  :
1246  constraint_position (numbers::invalid_size_type)
1247  {
1248  *this = (in);
1249  }
1250 
1251  inline
1252  Distributing &Distributing::operator = (const Distributing &in)
1253  {
1254  global_row = in.global_row;
1255  local_row = in.local_row;
1256  // the constraints pointer should not contain any data here.
1257  Assert (constraint_position == numbers::invalid_size_type,
1258  ExcInternalError());
1259 
1260  if (in.constraint_position != numbers::invalid_size_type)
1261  {
1262  constraint_position = in.constraint_position;
1263  in.constraint_position = numbers::invalid_size_type;
1264  }
1265  return *this;
1266  }
1267 
1268 
1269 
1270  // this is a cache for constraints that are encountered on a local level.
1271  // The functionality is similar to
1272  // std::vector<std::vector<std::pair<uint,double> > >, but tuned so that
1273  // frequent memory allocation for each entry is avoided. The data is put
1274  // into a std::vector<std::pair<uint,double> > and the row length is kept
1275  // fixed at row_length. Both the number of rows and the row length can
1276  // change is this structure is filled. In that case, the data is
1277  // rearranged. This is not directly used in the user code, but accessed via
1278  // the GlobalRowsFromLocal.
1279  struct DataCache
1280  {
1281  DataCache ()
1282  :
1283  row_length (8)
1284  {}
1285 
1286  void reinit ()
1287  {
1288  individual_size.resize(0);
1289  data.resize(0);
1290  }
1291 
1292  size_type insert_new_index (const std::pair<size_type,double> &pair)
1293  {
1294  Assert(row_length > 0, ExcInternalError());
1295  const unsigned int index = individual_size.size();
1296  individual_size.push_back(1);
1297  data.resize(individual_size.size()*row_length);
1298  data[index*row_length] = pair;
1299  individual_size[index] = 1;
1300  return index;
1301  }
1302 
1303  void append_index (const size_type index,
1304  const std::pair<size_type,double> &pair)
1305  {
1306  AssertIndexRange (index, individual_size.size());
1307  const size_type my_length = individual_size[index];
1308  if (my_length == row_length)
1309  {
1310  AssertDimension(data.size(), individual_size.size()*row_length);
1311  // no space left in this row, need to double row_length and
1312  // rearrange the data items. Move all items to the right except the
1313  // first one, starting at the back. Since individual_size contains
1314  // at least one element when we get here, subtracting 1 works fine.
1315  data.resize(2*data.size());
1316  for (size_type i=individual_size.size()-1; i>0; --i)
1317  std::memmove(&data[i*row_length*2], &data[i*row_length],
1318  individual_size[i]*
1319  sizeof(std::pair<size_type,double>));
1320  row_length *= 2;
1321  }
1322  data[index*row_length+my_length] = pair;
1323  individual_size[index] = my_length + 1;
1324  }
1325 
1326  size_type
1327  get_size (const size_type index) const
1328  {
1329  return individual_size[index];
1330  }
1331 
1332  const std::pair<size_type,double> *
1333  get_entry (const size_type index) const
1334  {
1335  return &data[index*row_length];
1336  }
1337 
1338  size_type row_length;
1339 
1340  std::vector<std::pair<size_type,double> > data;
1341 
1342  std::vector<size_type> individual_size;
1343  };
1344 
1345 
1346 
1347  // collects all the global rows from a local contribution (cell) and their
1348  // origin (direct/constraint). this is basically a vector consisting of
1349  // "Distributing" structs using access via the DataCache. Provides some
1350  // specialized sort and insert functions.
1351  //
1352  // in case there are no constraints, this is basically a list of pairs
1353  // <uint,unit> with the first index being the global index and the second
1354  // index the local index. The list is sorted with respect to the global
1355  // index.
1356  //
1357  // in case there are constraints, a global dof might get a contribution also
1358  // because it gets data from a constrained dof. This means that a global dof
1359  // might also have indirect contributions from a local dof via a constraint,
1360  // besides the direct ones.
1361  //
1362  // The actions performed here correspond to reshaping the constraint
1363  // information from global degrees of freedom to local ones (i.e.,
1364  // cell-related DoFs), and also transforming the constraint information from
1365  // compressed row storage (each local dof that is constrained has a list of
1366  // constraint entries associated to it) into compressed column storage based
1367  // on the cell-related DoFs (we have a list of global degrees of freedom,
1368  // and to each we have a list of local rows where the entries come from). To
1369  // increase the speed, we additionally store whether an entry is generated
1370  // directly from the local degrees of freedom or whether it comes from a
1371  // constraint.
1373  {
1374  public:
1376  :
1377  n_active_rows (0),
1378  n_inhomogeneous_rows (0)
1379  {}
1380 
1381  void reinit (const size_type n_local_rows)
1382  {
1383  total_row_indices.resize(n_local_rows);
1384  for (unsigned int i=0; i<n_local_rows; ++i)
1385  total_row_indices[i].constraint_position = numbers::invalid_size_type;
1386  n_active_rows = n_local_rows;
1387  n_inhomogeneous_rows = 0;
1388  data_cache.reinit();
1389  }
1390 
1391  // implemented below
1392  void insert_index (const size_type global_row,
1393  const size_type local_row,
1394  const double constraint_value);
1395  void sort ();
1396 
1397  // Print object for debugging purpose
1398  void print(std::ostream &os)
1399  {
1400  os << "Active rows " << n_active_rows << std::endl
1401  << "Constr rows " << n_constraints() << std::endl
1402  << "Inhom rows " << n_inhomogeneous_rows << std::endl
1403  << "Local: ";
1404  for (size_type i=0 ; i<total_row_indices.size() ; ++i)
1405  os << ' ' << std::setw(4) << total_row_indices[i].local_row;
1406  os << std::endl
1407  << "Global:";
1408  for (size_type i=0 ; i<total_row_indices.size() ; ++i)
1409  os << ' ' << std::setw(4) << total_row_indices[i].global_row;
1410  os << std::endl
1411  << "ConPos:";
1412  for (size_type i=0 ; i<total_row_indices.size() ; ++i)
1413  os << ' ' << std::setw(4) << total_row_indices[i].constraint_position;
1414  os << std::endl;
1415  }
1416 
1417 
1418  // return all kind of information on the constraints
1419 
1420  // returns the number of global indices in the struct
1421  size_type size () const
1422  {
1423  return n_active_rows;
1424  }
1425 
1426  // returns the number of constraints that are associated to the
1427  // counter_index-th entry in the list
1428  size_type size (const size_type counter_index) const
1429  {
1430  return (total_row_indices[counter_index].constraint_position ==
1432  0 :
1433  data_cache.get_size(total_row_indices[counter_index].
1434  constraint_position));
1435  }
1436 
1437  // returns the global row of the counter_index-th entry in the list
1438  size_type global_row (const size_type counter_index) const
1439  {
1440  return total_row_indices[counter_index].global_row;
1441  }
1442 
1443  // returns the global row of the counter_index-th entry in the list
1444  size_type &global_row (const size_type counter_index)
1445  {
1446  return total_row_indices[counter_index].global_row;
1447  }
1448 
1449  // returns the local row in the cell matrix associated with the
1450  // counter_index-th entry in the list. Returns invalid_size_type for
1451  // constrained rows
1452  size_type local_row (const size_type counter_index) const
1453  {
1454  return total_row_indices[counter_index].local_row;
1455  }
1456 
1457  // writable index
1458  size_type &local_row (const size_type counter_index)
1459  {
1460  return total_row_indices[counter_index].local_row;
1461  }
1462 
1463  // returns the local row in the cell matrix associated with the
1464  // counter_index-th entry in the list in the index_in_constraint-th
1465  // position of constraints
1466  size_type local_row (const size_type counter_index,
1467  const size_type index_in_constraint) const
1468  {
1469  return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
1470  [index_in_constraint]).first;
1471  }
1472 
1473  // returns the value of the constraint in the counter_index-th entry in
1474  // the list in the index_in_constraint-th position of constraints
1475  double constraint_value (const size_type counter_index,
1476  const size_type index_in_constraint) const
1477  {
1478  return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
1479  [index_in_constraint]).second;
1480  }
1481 
1482  // returns whether there is one row with indirect contributions (i.e.,
1483  // there has been at least one constraint with non-trivial ConstraintLine)
1484  bool have_indirect_rows () const
1485  {
1486  return data_cache.individual_size.empty() == false;
1487  }
1488 
1489  // append an entry that is constrained. This means that there is one less
1490  // nontrivial row
1491  void insert_constraint (const size_type constrained_local_dof)
1492  {
1493  --n_active_rows;
1494  total_row_indices[n_active_rows].local_row = constrained_local_dof;
1495  total_row_indices[n_active_rows].global_row = numbers::invalid_size_type;
1496  }
1497 
1498  // returns the number of constrained dofs in the structure. Constrained
1499  // dofs do not contribute directly to the matrix, but are needed in order
1500  // to set matrix diagonals and resolve inhomogeneities
1501  size_type n_constraints () const
1502  {
1503  return total_row_indices.size()-n_active_rows;
1504  }
1505 
1506  // returns the number of constrained dofs in the structure that have an
1507  // inhomogeneity
1508  size_type n_inhomogeneities () const
1509  {
1510  return n_inhomogeneous_rows;
1511  }
1512 
1513  // tells the structure that the ith constraint is
1514  // inhomogeneous. inhomogeneous constraints contribute to right hand
1515  // sides, so to have fast access to them, put them before homogeneous
1516  // constraints
1517  void set_ith_constraint_inhomogeneous (const size_type i)
1518  {
1519  Assert (i >= n_inhomogeneous_rows, ExcInternalError());
1520  std::swap (total_row_indices[n_active_rows+i],
1521  total_row_indices[n_active_rows+n_inhomogeneous_rows]);
1522  n_inhomogeneous_rows++;
1523  }
1524 
1525  // the local row where constraint number i was detected, to find that row
1526  // easily when the GlobalRowsToLocal has been set up
1527  size_type constraint_origin (size_type i) const
1528  {
1529  return total_row_indices[n_active_rows+i].local_row;
1530  }
1531 
1532  // a vector that contains all the global ids and the corresponding local
1533  // ids as well as a pointer to that data where we store how to resolve
1534  // constraints.
1535  std::vector<Distributing> total_row_indices;
1536 
1537  private:
1538  // holds the actual data from the constraints
1539  DataCache data_cache;
1540 
1541  // how many rows there are, constraints disregarded
1542  size_type n_active_rows;
1543 
1544  // the number of rows with inhomogeneous constraints
1545  size_type n_inhomogeneous_rows;
1546  };
1547 
1548  // a function that appends an additional row to the list of values, or
1549  // appends a value to an already existing row. Similar functionality as for
1550  // std::map<size_type,Distributing>, but here done for a
1551  // std::vector<Distributing>, much faster for short lists as we have them
1552  // here
1553  inline
1554  void
1555  GlobalRowsFromLocal::insert_index (const size_type global_row,
1556  const size_type local_row,
1557  const double constraint_value)
1558  {
1559  typedef std::vector<Distributing>::iterator index_iterator;
1560  index_iterator pos, pos1;
1561  Distributing row_value (global_row);
1562  std::pair<size_type,double> constraint (local_row, constraint_value);
1563 
1564  // check whether the list was really sorted before entering here
1565  for (size_type i=1; i<n_active_rows; ++i)
1566  Assert (total_row_indices[i-1] < total_row_indices[i], ExcInternalError());
1567 
1568  pos = Utilities::lower_bound (total_row_indices.begin(),
1569  total_row_indices.begin()+n_active_rows,
1570  row_value);
1571  if (pos->global_row == global_row)
1572  pos1 = pos;
1573  else
1574  {
1575  pos1 = total_row_indices.insert(pos, row_value);
1576  ++n_active_rows;
1577  }
1578 
1579  if (pos1->constraint_position == numbers::invalid_size_type)
1580  pos1->constraint_position = data_cache.insert_new_index (constraint);
1581  else
1582  data_cache.append_index (pos1->constraint_position, constraint);
1583  }
1584 
1585  // this sort algorithm sorts std::vector<Distributing>, but does not take
1586  // the constraints into account. this means that in case that constraints
1587  // are already inserted, this function does not work as expected. Use
1588  // shellsort, which is very fast in case the indices are already sorted
1589  // (which is the usual case with DG elements), and not too slow in other
1590  // cases
1591  inline
1592  void
1593  GlobalRowsFromLocal::sort ()
1594  {
1595  size_type i, j, j2, temp, templ, istep;
1596  size_type step;
1597 
1598  // check whether the constraints are really empty.
1599  const size_type length = size();
1600 
1601  // make sure that we are in the range of the vector
1602  AssertIndexRange (length, total_row_indices.size()+1);
1603  for (size_type i=0; i<length; ++i)
1604  Assert (total_row_indices[i].constraint_position ==
1606  ExcInternalError());
1607 
1608  step = length/2;
1609  while (step > 0)
1610  {
1611  for (i=step; i < length; i++)
1612  {
1613  istep = step;
1614  j = i;
1615  j2 = j-istep;
1616  temp = total_row_indices[i].global_row;
1617  templ = total_row_indices[i].local_row;
1618  if (total_row_indices[j2].global_row > temp)
1619  {
1620  while ((j >= istep) && (total_row_indices[j2].global_row > temp))
1621  {
1622  total_row_indices[j].global_row = total_row_indices[j2].global_row;
1623  total_row_indices[j].local_row = total_row_indices[j2].local_row;
1624  j = j2;
1625  j2 -= istep;
1626  }
1627  total_row_indices[j].global_row = temp;
1628  total_row_indices[j].local_row = templ;
1629  }
1630  }
1631  step = step>>1;
1632  }
1633  }
1634 
1635 
1636 
1654  template <typename Number>
1656  {
1657  public:
1659  {
1664  :
1665  in_use (false)
1666  {}
1667 
1672  :
1673  in_use (false)
1674  {}
1675 
1679  bool in_use;
1680 
1684  std::vector<size_type> columns;
1685 
1689  std::vector<Number> values;
1690 
1694  std::vector<size_type> block_starts;
1695 
1699  std::vector<size_type> vector_indices;
1700 
1706 
1712  };
1713 
1718  {
1719  public:
1725  :
1726  my_scratch_data(&ConstraintMatrixData::scratch_data.get())
1727  {
1728  Assert(my_scratch_data->in_use == false,
1729  ExcMessage("Access to thread-local scratch data tried, but it is already "
1730  "in use"));
1731  my_scratch_data->in_use = true;
1732  }
1733 
1738  {
1739  my_scratch_data->in_use = false;
1740  }
1741 
1746  {
1747  return *my_scratch_data;
1748  }
1749 
1754  {
1755  return my_scratch_data;
1756  }
1757 
1758  private:
1759  ScratchData *my_scratch_data;
1760  };
1761 
1762  private:
1767  };
1768 
1769 
1770 
1771  // function for block matrices: Find out where in the list of local dofs
1772  // (sorted according to global ids) the individual blocks start. Transform
1773  // the global indices to block-local indices in order to be able to use
1774  // functions like vector.block(1)(block_local_id), instead of
1775  // vector(global_id). This avoids transforming indices one-by-one later on.
1776  template <class BlockType>
1777  inline
1778  void
1779  make_block_starts (const BlockType &block_object,
1780  GlobalRowsFromLocal &global_rows,
1781  std::vector<size_type> &block_starts)
1782  {
1783  AssertDimension (block_starts.size(), block_object.n_block_rows()+1);
1784 
1785  typedef std::vector<Distributing>::iterator row_iterator;
1786  row_iterator block_indices = global_rows.total_row_indices.begin();
1787 
1788  const size_type num_blocks = block_object.n_block_rows();
1789  const size_type n_active_rows = global_rows.size();
1790 
1791  // find end of rows.
1792  block_starts[0] = 0;
1793  for (size_type i=1; i<num_blocks; ++i)
1794  {
1795  row_iterator first_block =
1796  Utilities::lower_bound (block_indices,
1797  global_rows.total_row_indices.begin()+n_active_rows,
1798  Distributing(block_object.get_row_indices().block_start(i)));
1799  block_starts[i] = first_block - global_rows.total_row_indices.begin();
1800  block_indices = first_block;
1801  }
1802  block_starts[num_blocks] = n_active_rows;
1803 
1804  // transform row indices to block-local index space
1805  for (size_type i=block_starts[1]; i<n_active_rows; ++i)
1806  global_rows.global_row(i) = block_object.get_row_indices().
1807  global_to_local(global_rows.global_row(i)).second;
1808  }
1809 
1810 
1811 
1812  // same as before, but for std::vector<uint> instead of
1813  // GlobalRowsFromLocal. Used in functions for sparsity patterns.
1814  template <class BlockType>
1815  inline
1816  void
1817  make_block_starts (const BlockType &block_object,
1818  std::vector<size_type> &row_indices,
1819  std::vector<size_type> &block_starts)
1820  {
1821  AssertDimension (block_starts.size(), block_object.n_block_rows()+1);
1822 
1823  typedef std::vector<size_type>::iterator row_iterator;
1824  row_iterator col_indices = row_indices.begin();
1825 
1826  const size_type num_blocks = block_object.n_block_rows();
1827 
1828  // find end of rows.
1829  block_starts[0] = 0;
1830  for (size_type i=1; i<num_blocks; ++i)
1831  {
1832  row_iterator first_block =
1833  Utilities::lower_bound (col_indices,
1834  row_indices.end(),
1835  block_object.get_row_indices().block_start(i));
1836  block_starts[i] = first_block - row_indices.begin();
1837  col_indices = first_block;
1838  }
1839  block_starts[num_blocks] = row_indices.size();
1840 
1841  // transform row indices to local index space
1842  for (size_type i=block_starts[1]; i<row_indices.size(); ++i)
1843  row_indices[i] = block_object.get_row_indices().
1844  global_to_local(row_indices[i]).second;
1845  }
1846 
1847 
1848 
1849  // resolves constraints of one column at the innermost loop. goes through
1850  // the origin of each global entry and finds out which data we need to
1851  // collect.
1852  static inline
1853  double resolve_matrix_entry (const GlobalRowsFromLocal &global_rows,
1854  const GlobalRowsFromLocal &global_cols,
1855  const size_type i,
1856  const size_type j,
1857  const size_type loc_row,
1858  const FullMatrix<double> &local_matrix)
1859  {
1860  const size_type loc_col = global_cols.local_row(j);
1861  double col_val;
1862 
1863  // case 1: row has direct contribution in local matrix. decide whether col
1864  // has a direct contribution. if not, set the value to zero.
1865  if (loc_row != numbers::invalid_size_type)
1866  {
1867  col_val = ((loc_col != numbers::invalid_size_type) ?
1868  local_matrix(loc_row, loc_col) : 0);
1869 
1870  // account for indirect contributions by constraints in column
1871  for (size_type p=0; p<global_cols.size(j); ++p)
1872  col_val += (local_matrix(loc_row, global_cols.local_row(j,p)) *
1873  global_cols.constraint_value(j,p));
1874  }
1875 
1876  // case 2: row has no direct contribution in local matrix
1877  else
1878  col_val = 0;
1879 
1880  // account for indirect contributions by constraints in row, going trough
1881  // the direct and indirect references in the given column.
1882  for (size_type q=0; q<global_rows.size(i); ++q)
1883  {
1884  double add_this = (loc_col != numbers::invalid_size_type)
1885  ? local_matrix(global_rows.local_row(i,q), loc_col) : 0;
1886 
1887  for (size_type p=0; p<global_cols.size(j); ++p)
1888  add_this += (local_matrix(global_rows.local_row(i,q),
1889  global_cols.local_row(j,p))
1890  *
1891  global_cols.constraint_value(j,p));
1892  col_val += add_this * global_rows.constraint_value(i,q);
1893  }
1894  return col_val;
1895  }
1896 
1897 
1898 
1899  // computes all entries that need to be written into global_rows[i]. Lists
1900  // the resulting values in val_ptr, and the corresponding column indices in
1901  // col_ptr.
1902  template <typename number>
1903  inline
1904  void
1905  resolve_matrix_row (const GlobalRowsFromLocal &global_rows,
1906  const GlobalRowsFromLocal &global_cols,
1907  const size_type i,
1908  const size_type column_start,
1909  const size_type column_end,
1910  const FullMatrix<double> &local_matrix,
1911  size_type *&col_ptr,
1912  number *&val_ptr)
1913  {
1914  if (column_end == column_start)
1915  return;
1916 
1917  AssertIndexRange (column_end-1, global_cols.size());
1918  const size_type loc_row = global_rows.local_row(i);
1919 
1920  // fast function if there are no indirect references to any of the local
1921  // rows at all on this set of dofs (saves a lot of checks). the only check
1922  // we actually need to perform is whether the matrix element is zero.
1923  if (global_rows.have_indirect_rows() == false &&
1924  global_cols.have_indirect_rows() == false)
1925  {
1926  AssertIndexRange(loc_row, local_matrix.m());
1927  const double *matrix_ptr = &local_matrix(loc_row, 0);
1928 
1929  for (size_type j=column_start; j<column_end; ++j)
1930  {
1931  const size_type loc_col = global_cols.local_row(j);
1932  AssertIndexRange(loc_col, local_matrix.n());
1933  const double col_val = matrix_ptr[loc_col];
1934  if (col_val != 0.)
1935  {
1936  *val_ptr++ = static_cast<number> (col_val);
1937  *col_ptr++ = global_cols.global_row(j);
1938  }
1939  }
1940  }
1941 
1942  // more difficult part when there are indirect references and when we need
1943  // to do some more checks.
1944  else
1945  {
1946  for (size_type j=column_start; j<column_end; ++j)
1947  {
1948  double col_val = resolve_matrix_entry (global_rows, global_cols, i, j,
1949  loc_row, local_matrix);
1950 
1951  // if we got some nontrivial value, append it to the array of
1952  // values.
1953  if (col_val != 0.)
1954  {
1955  *val_ptr++ = static_cast<number> (col_val);
1956  *col_ptr++ = global_cols.global_row(j);
1957  }
1958  }
1959  }
1960  }
1961 
1962 
1963 
1964  // specialized function that can write into the row of a
1965  // SparseMatrix<number>.
1966  namespace dealiiSparseMatrix
1967  {
1968  template <typename SparseMatrixIterator>
1969  static inline
1970  void add_value (const double value,
1971  const size_type row,
1972  const size_type column,
1973  SparseMatrixIterator &matrix_values)
1974  {
1975  if (value != 0.)
1976  {
1977  while (matrix_values->column() < column)
1978  ++matrix_values;
1979  Assert (matrix_values->column() == column,
1981  matrix_values->value() += value;
1982  }
1983  }
1984  }
1985 
1986 
1987  // similar as before, now with shortcut for deal.II sparse matrices. this
1988  // lets us avoid using extra arrays, and does all the operations just in
1989  // place, i.e., in the respective matrix row
1990  template <typename number>
1991  inline
1992  void
1993  resolve_matrix_row (const GlobalRowsFromLocal &global_rows,
1994  const size_type i,
1995  const size_type column_start,
1996  const size_type column_end,
1997  const FullMatrix<double> &local_matrix,
1998  SparseMatrix<number> *sparse_matrix)
1999  {
2000  if (column_end == column_start)
2001  return;
2002 
2003  AssertIndexRange (column_end-1, global_rows.size());
2004  const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern();
2005 
2006  if (sparsity.n_nonzero_elements() == 0)
2007  return;
2008 
2009  const size_type row = global_rows.global_row(i);
2010  const size_type loc_row = global_rows.local_row(i);
2011 
2013  matrix_values = sparse_matrix->begin(row);
2014  const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols();
2015 
2016  // distinguish three cases about what can happen for checking whether the
2017  // diagonal is the first element of the row. this avoids if statements at
2018  // the innermost loop positions
2019 
2020  if (!optimize_diagonal) // case 1: no diagonal optimization in matrix
2021  {
2022  if (global_rows.have_indirect_rows() == false)
2023  {
2024  AssertIndexRange (loc_row, local_matrix.m());
2025  const double *matrix_ptr = &local_matrix(loc_row, 0);
2026 
2027  for (size_type j=column_start; j<column_end; ++j)
2028  {
2029  const size_type loc_col = global_rows.local_row(j);
2030  const double col_val = matrix_ptr[loc_col];
2031  dealiiSparseMatrix::add_value (col_val, row,
2032  global_rows.global_row(j),
2033  matrix_values);
2034  }
2035  }
2036  else
2037  {
2038  for (size_type j=column_start; j<column_end; ++j)
2039  {
2040  double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
2041  loc_row, local_matrix);
2042  dealiiSparseMatrix::add_value (col_val, row,
2043  global_rows.global_row(j),
2044  matrix_values);
2045  }
2046  }
2047  }
2048  else if (i>=column_start && i<column_end) // case 2: can split loop
2049  {
2050  ++matrix_values; // jump over diagonal element
2051  if (global_rows.have_indirect_rows() == false)
2052  {
2053  AssertIndexRange (loc_row, local_matrix.m());
2054  const double *matrix_ptr = &local_matrix(loc_row, 0);
2055 
2056  sparse_matrix->begin(row)->value() += matrix_ptr[loc_row];
2057  for (size_type j=column_start; j<i; ++j)
2058  {
2059  const size_type loc_col = global_rows.local_row(j);
2060  const double col_val = matrix_ptr[loc_col];
2061  dealiiSparseMatrix::add_value(col_val, row,
2062  global_rows.global_row(j),
2063  matrix_values);
2064  }
2065  for (size_type j=i+1; j<column_end; ++j)
2066  {
2067  const size_type loc_col = global_rows.local_row(j);
2068  const double col_val = matrix_ptr[loc_col];
2069  dealiiSparseMatrix::add_value(col_val, row,
2070  global_rows.global_row(j),
2071  matrix_values);
2072  }
2073  }
2074  else
2075  {
2076  sparse_matrix->begin(row)->value() +=
2077  resolve_matrix_entry (global_rows, global_rows, i, i,
2078  loc_row, local_matrix);
2079  for (size_type j=column_start; j<i; ++j)
2080  {
2081  double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
2082  loc_row, local_matrix);
2083  dealiiSparseMatrix::add_value (col_val, row,
2084  global_rows.global_row(j),
2085  matrix_values);
2086  }
2087  for (size_type j=i+1; j<column_end; ++j)
2088  {
2089  double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
2090  loc_row, local_matrix);
2091  dealiiSparseMatrix::add_value (col_val, row,
2092  global_rows.global_row(j),
2093  matrix_values);
2094  }
2095  }
2096  }
2097  // case 3: can't say - need to check inside the loop
2098  else if (global_rows.have_indirect_rows() == false)
2099  {
2100  ++matrix_values; // jump over diagonal element
2101  AssertIndexRange (loc_row, local_matrix.m());
2102  const double *matrix_ptr = &local_matrix(loc_row, 0);
2103 
2104  for (size_type j=column_start; j<column_end; ++j)
2105  {
2106  const size_type loc_col = global_rows.local_row(j);
2107  const double col_val = matrix_ptr[loc_col];
2108  if (row==global_rows.global_row(j))
2109  sparse_matrix->begin(row)->value() += col_val;
2110  else
2111  dealiiSparseMatrix::add_value(col_val, row,
2112  global_rows.global_row(j),
2113  matrix_values);
2114  }
2115  }
2116  else
2117  {
2118  ++matrix_values; // jump over diagonal element
2119  for (size_type j=column_start; j<column_end; ++j)
2120  {
2121  double col_val = resolve_matrix_entry (global_rows, global_rows, i,
2122  j, loc_row, local_matrix);
2123  if (row==global_rows.global_row(j))
2124  sparse_matrix->begin(row)->value() += col_val;
2125  else
2126  dealiiSparseMatrix::add_value (col_val, row,
2127  global_rows.global_row(j),
2128  matrix_values);
2129  }
2130  }
2131  }
2132 
2133 
2134 
2135  // Same function to resolve all entries that will be added to the given
2136  // global row global_rows[i] as before, now for sparsity pattern
2137  inline
2138  void
2139  resolve_matrix_row (const GlobalRowsFromLocal &global_rows,
2140  const size_type i,
2141  const size_type column_start,
2142  const size_type column_end,
2143  const Table<2,bool> &dof_mask,
2144  std::vector<size_type>::iterator &col_ptr)
2145  {
2146  if (column_end == column_start)
2147  return;
2148 
2149  const size_type loc_row = global_rows.local_row(i);
2150 
2151  // fast function if there are no indirect references to any of the local
2152  // rows at all on this set of dofs
2153  if (global_rows.have_indirect_rows() == false)
2154  {
2155  Assert(loc_row < dof_mask.n_rows(),
2156  ExcInternalError());
2157 
2158  for (size_type j=column_start; j<column_end; ++j)
2159  {
2160  const size_type loc_col = global_rows.local_row(j);
2161  Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
2162 
2163  if (dof_mask(loc_row,loc_col) == true)
2164  *col_ptr++ = global_rows.global_row(j);
2165  }
2166  }
2167 
2168  // slower functions when there are indirect references and when we need to
2169  // do some more checks.
2170  else
2171  {
2172  for (size_type j=column_start; j<column_end; ++j)
2173  {
2174  const size_type loc_col = global_rows.local_row(j);
2175  if (loc_row != numbers::invalid_size_type)
2176  {
2177  Assert (loc_row < dof_mask.n_rows(), ExcInternalError());
2178  if (loc_col != numbers::invalid_size_type)
2179  {
2180  Assert (loc_col < dof_mask.n_cols(), ExcInternalError());
2181  if (dof_mask(loc_row,loc_col) == true)
2182  goto add_this_index;
2183  }
2184 
2185  for (size_type p=0; p<global_rows.size(j); ++p)
2186  if (dof_mask(loc_row,global_rows.local_row(j,p)) == true)
2187  goto add_this_index;
2188  }
2189 
2190  for (size_type q=0; q<global_rows.size(i); ++q)
2191  {
2192  if (loc_col != numbers::invalid_size_type)
2193  {
2194  Assert (loc_col < dof_mask.n_cols(), ExcInternalError());
2195  if (dof_mask(global_rows.local_row(i,q),loc_col) == true)
2196  goto add_this_index;
2197  }
2198 
2199  for (size_type p=0; p<global_rows.size(j); ++p)
2200  if (dof_mask(global_rows.local_row(i,q),
2201  global_rows.local_row(j,p)) == true)
2202  goto add_this_index;
2203  }
2204 
2205  continue;
2206  // if we got some nontrivial value, append it to the array of
2207  // values.
2208 add_this_index:
2209  *col_ptr++ = global_rows.global_row(j);
2210  }
2211  }
2212  }
2213 
2214 
2215 
2216  // to make sure that the global matrix remains invertible, we need to do
2217  // something with the diagonal elements. add the absolute value of the local
2218  // matrix, so the resulting entry will always be positive and furthermore be
2219  // in the same order of magnitude as the other elements of the matrix
2220  //
2221  // note that this also captures the special case that a dof is both
2222  // constrained and fixed (this can happen for hanging nodes in 3d that also
2223  // happen to be on the boundary). in that case, following the program flow
2224  // in distribute_local_to_global, it is realized that when distributing the
2225  // row and column no elements of the matrix are actually touched if all the
2226  // degrees of freedom to which this dof is constrained are also constrained
2227  // (the usual case with hanging nodes in 3d). however, in the line below, we
2228  // do actually do something with this dof
2229  template <typename MatrixType, typename VectorType>
2230  inline void
2231  set_matrix_diagonals (const internals::GlobalRowsFromLocal &global_rows,
2232  const std::vector<size_type> &local_dof_indices,
2233  const FullMatrix<double> &local_matrix,
2234  const ConstraintMatrix &constraints,
2235  MatrixType &global_matrix,
2236  VectorType &global_vector,
2237  bool use_inhomogeneities_for_rhs)
2238  {
2239  if (global_rows.n_constraints() > 0)
2240  {
2241  double average_diagonal = 0;
2242  for (size_type i=0; i<local_matrix.m(); ++i)
2243  average_diagonal += std::fabs (local_matrix(i,i));
2244  average_diagonal /= static_cast<double>(local_matrix.m());
2245 
2246  for (size_type i=0; i<global_rows.n_constraints(); i++)
2247  {
2248  const size_type local_row = global_rows.constraint_origin(i);
2249  const size_type global_row = local_dof_indices[local_row];
2250  const typename MatrixType::value_type new_diagonal
2251  = (std::fabs(local_matrix(local_row,local_row)) != 0 ?
2252  std::fabs(local_matrix(local_row,local_row)) : average_diagonal);
2253  global_matrix.add(global_row, global_row, new_diagonal);
2254 
2255  // if the use_inhomogeneities_for_rhs flag is set to true, the
2256  // inhomogeneities are used to create the global vector. instead
2257  // of fill in a zero in the ith components with an inhomogeneity,
2258  // we set those to: inhomogeneity(i)*global_matrix (i,i).
2259  if (use_inhomogeneities_for_rhs == true)
2260  global_vector(global_row) += constraints.get_inhomogeneity(global_row) * new_diagonal;
2261  }
2262  }
2263  }
2264 
2265 
2266 
2267  // similar function as the one above for setting matrix diagonals, but now
2268  // doing that for sparsity patterns when setting them up using
2269  // add_entries_local_to_global. In case we keep constrained entries, add all
2270  // the rows and columns related to the constrained dof, otherwise just add
2271  // the diagonal
2272  template <typename SparsityType>
2273  inline void
2274  set_sparsity_diagonals (const internals::GlobalRowsFromLocal &global_rows,
2275  const std::vector<size_type> &local_dof_indices,
2276  const Table<2,bool> &dof_mask,
2277  const bool keep_constrained_entries,
2278  SparsityType &sparsity_pattern)
2279  {
2280  // if we got constraints, need to add the diagonal element and, if the
2281  // user requested so, also the rest of the entries in rows and columns
2282  // that have been left out above
2283  if (global_rows.n_constraints() > 0)
2284  {
2285  for (size_type i=0; i<global_rows.n_constraints(); i++)
2286  {
2287  const size_type local_row = global_rows.constraint_origin(i);
2288  const size_type global_row = local_dof_indices[local_row];
2289  if (keep_constrained_entries == true)
2290  {
2291  for (size_type j=0; j<local_dof_indices.size(); ++j)
2292  {
2293  if (dof_mask(local_row,j) == true)
2294  sparsity_pattern.add(global_row,
2295  local_dof_indices[j]);
2296  if (dof_mask(j,local_row) == true)
2297  sparsity_pattern.add(local_dof_indices[j],
2298  global_row);
2299  }
2300  }
2301  else
2302  // don't keep constrained entries - just add the diagonal.
2303  sparsity_pattern.add(global_row,global_row);
2304  }
2305  }
2306  }
2307 
2308 } // end of namespace internals
2309 
2310 
2311 
2312 // Basic idea of setting up a list of
2313 // all global dofs: first find all rows and columns
2314 // that we are going to write touch,
2315 // and then go through the
2316 // lines and collect all the local rows that
2317 // are related to it.
2318 void
2320 make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
2321  internals::GlobalRowsFromLocal &global_rows) const
2322 {
2323  const size_type n_local_dofs = local_dof_indices.size();
2324  AssertDimension (n_local_dofs, global_rows.size());
2325 
2326  // when distributing the local data to the global matrix, we can quite
2327  // cheaply sort the indices (obviously, this introduces the need for
2328  // allocating some memory on the way, but we need to do this only for rows,
2329  // whereas the distribution process itself goes over rows and columns). This
2330  // has the advantage that when writing into the global matrix, we can make
2331  // use of the sortedness.
2332 
2333  // so the first step is to create a sorted list of all row values that are
2334  // possible. these values are either the rows from unconstrained dofs, or
2335  // some indices introduced by dofs constrained to a combination of some
2336  // other dofs. regarding the data type, choose an STL vector of a pair of
2337  // unsigned ints (for global columns) and internal data (containing local
2338  // columns + possible jumps from constraints). Choosing an STL map or
2339  // anything else M.K. knows of would be much more expensive here!
2340 
2341  // cache whether we have to resolve any indirect rows generated from
2342  // resolving constrained dofs.
2343  size_type added_rows = 0;
2344 
2345  // first add the indices in an unsorted way and only keep track of the
2346  // constraints that appear. They are resolved in a second step.
2347  for (size_type i = 0; i<n_local_dofs; ++i)
2348  {
2349  if (is_constrained(local_dof_indices[i]) == false)
2350  {
2351  global_rows.global_row(added_rows) = local_dof_indices[i];
2352  global_rows.local_row(added_rows++) = i;
2353  }
2354  else
2355  global_rows.insert_constraint(i);
2356  }
2357  global_rows.sort();
2358 
2359  const size_type n_constrained_rows = n_local_dofs-added_rows;
2360  for (size_type i=0; i<n_constrained_rows; ++i)
2361  {
2362  const size_type local_row = global_rows.constraint_origin(i);
2363  AssertIndexRange(local_row, n_local_dofs);
2364  const size_type global_row = local_dof_indices[local_row];
2365  Assert (is_constrained(global_row), ExcInternalError());
2366  const ConstraintLine &position =
2367  lines[lines_cache[calculate_line_index(global_row)]];
2368  if (position.inhomogeneity != 0)
2369  global_rows.set_ith_constraint_inhomogeneous (i);
2370  for (size_type q=0; q<position.entries.size(); ++q)
2371  global_rows.insert_index (position.entries[q].first,
2372  local_row,
2373  position.entries[q].second);
2374  }
2375 }
2376 
2377 
2378 
2379 // Same function as before, but now do only extract the global indices that
2380 // come from the local ones without storing their origin. Used for sparsity
2381 // pattern generation.
2382 inline
2383 void
2385 make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
2386  std::vector<size_type> &active_dofs) const
2387 {
2388  const size_type n_local_dofs = local_dof_indices.size();
2389  size_type added_rows = 0;
2390  for (size_type i = 0; i<n_local_dofs; ++i)
2391  {
2392  if (is_constrained(local_dof_indices[i]) == false)
2393  {
2394  active_dofs[added_rows++] = local_dof_indices[i];
2395  continue;
2396  }
2397 
2398  active_dofs[n_local_dofs-i+added_rows-1] = i;
2399  }
2400  std::sort (active_dofs.begin(), active_dofs.begin()+added_rows);
2401 
2402  const size_type n_constrained_dofs = n_local_dofs-added_rows;
2403  for (size_type i=n_constrained_dofs; i>0; --i)
2404  {
2405  const size_type local_row = active_dofs.back();
2406 
2407  // remove constrained entry since we are going to resolve it in place
2408  active_dofs.pop_back();
2409  const size_type global_row = local_dof_indices[local_row];
2410  const ConstraintLine &position =
2411  lines[lines_cache[calculate_line_index(global_row)]];
2412  for (size_type q=0; q<position.entries.size(); ++q)
2413  {
2414  const size_type new_index = position.entries[q].first;
2415  if (active_dofs[active_dofs.size()-i] < new_index)
2416  active_dofs.insert(active_dofs.end()-i+1,new_index);
2417 
2418  // make binary search to find where to put the new index in order to
2419  // keep the list sorted
2420  else
2421  {
2422  std::vector<size_type>::iterator it =
2423  Utilities::lower_bound(active_dofs.begin(),
2424  active_dofs.end()-i+1,
2425  new_index);
2426  if (*it != new_index)
2427  active_dofs.insert(it, new_index);
2428  }
2429  }
2430  }
2431 }
2432 
2433 
2434 
2435 // Resolve the constraints from the vector and apply inhomogeneities.
2436 inline
2437 double
2439 resolve_vector_entry (const size_type i,
2440  const internals::GlobalRowsFromLocal &global_rows,
2441  const Vector<double> &local_vector,
2442  const std::vector<size_type> &local_dof_indices,
2443  const FullMatrix<double> &local_matrix) const
2444 {
2445  const size_type loc_row = global_rows.local_row(i);
2446  const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities();
2447  double val = 0;
2448  // has a direct contribution from some local entry. If we have inhomogeneous
2449  // constraints, compute the contribution of the inhomogeneity in the current
2450  // row.
2451  if (loc_row != numbers::invalid_size_type)
2452  {
2453  val = local_vector(loc_row);
2454  for (size_type i=0; i<n_inhomogeneous_rows; ++i)
2455  val -= (lines[lines_cache[calculate_line_index(local_dof_indices
2456  [global_rows.constraint_origin(i)])]].
2457  inhomogeneity *
2458  local_matrix(loc_row, global_rows.constraint_origin(i)));
2459  }
2460 
2461  // go through the indirect contributions
2462  for (size_type q=0; q<global_rows.size(i); ++q)
2463  {
2464  const size_type loc_row_q = global_rows.local_row(i,q);
2465  double add_this = local_vector (loc_row_q);
2466  for (size_type k=0; k<n_inhomogeneous_rows; ++k)
2468  (local_dof_indices
2469  [global_rows.constraint_origin(k)])]].
2470  inhomogeneity *
2471  local_matrix(loc_row_q,global_rows.constraint_origin(k)));
2472  val += add_this * global_rows.constraint_value(i,q);
2473  }
2474  return val;
2475 }
2476 
2477 
2478 // internal implementation for distribute_local_to_global for standard
2479 // (non-block) matrices
2480 template <typename MatrixType, typename VectorType>
2481 void
2483  const FullMatrix<double> &local_matrix,
2484  const Vector<double> &local_vector,
2485  const std::vector<size_type> &local_dof_indices,
2486  MatrixType &global_matrix,
2487  VectorType &global_vector,
2488  bool use_inhomogeneities_for_rhs,
2490 {
2491  // check whether we work on real vectors or we just used a dummy when
2492  // calling the other function above.
2493  const bool use_vectors = (local_vector.size() == 0 &&
2494  global_vector.size() == 0) ? false : true;
2495  typedef typename MatrixType::value_type number;
2496  const bool use_dealii_matrix =
2498 
2499  AssertDimension (local_matrix.n(), local_dof_indices.size());
2500  AssertDimension (local_matrix.m(), local_dof_indices.size());
2501  Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
2502  if (use_vectors == true)
2503  {
2504  AssertDimension (local_matrix.m(), local_vector.size());
2505  AssertDimension (global_matrix.m(), global_vector.size());
2506  }
2507  Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
2508 
2509  const size_type n_local_dofs = local_dof_indices.size();
2510 
2512  scratch_data;
2513 
2514  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
2515  global_rows.reinit(n_local_dofs);
2516  make_sorted_row_list (local_dof_indices, global_rows);
2517 
2518  const size_type n_actual_dofs = global_rows.size();
2519 
2520  // create arrays for the column data (indices and values) that will then be
2521  // written into the matrix. Shortcut for deal.II sparse matrix. We can use
2522  // the scratch data if we have a double matrix. Otherwise, we need to create
2523  // an array in any case since we cannot know about the actual data type in
2524  // the ConstraintMatrix class (unless we do cast). This involves a little
2525  // bit of logic to determine the type of the matrix value.
2526  std::vector<size_type> &cols = scratch_data->columns;
2527  std::vector<number> &vals = scratch_data->values;
2528  SparseMatrix<number> *sparse_matrix
2529  = dynamic_cast<SparseMatrix<number> *>(&global_matrix);
2530  if (use_dealii_matrix == false)
2531  {
2532  cols.resize (n_actual_dofs);
2533  vals.resize (n_actual_dofs);
2534  }
2535  else
2536  Assert (sparse_matrix != 0, ExcInternalError());
2537 
2538  // now do the actual job. go through all the global rows that we will touch
2539  // and call resolve_matrix_row for each of those.
2540  for (size_type i=0; i<n_actual_dofs; ++i)
2541  {
2542  const size_type row = global_rows.global_row(i);
2543 
2544  // calculate all the data that will be written into the matrix row.
2545  if (use_dealii_matrix == false)
2546  {
2547  size_type *col_ptr = &cols[0];
2548  // cast is uncritical here and only used to avoid compiler
2549  // warnings. We never access a non-double array
2550  number *val_ptr = &vals[0];
2551  internals::resolve_matrix_row (global_rows, global_rows, i, 0,
2552  n_actual_dofs,
2553  local_matrix, col_ptr, val_ptr);
2554  const size_type n_values = col_ptr - &cols[0];
2555  if (n_values > 0)
2556  global_matrix.add(row, n_values, &cols[0], &vals[0], false,
2557  true);
2558  }
2559  else
2560  internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
2561  local_matrix, sparse_matrix);
2562 
2563  // now to the vectors. besides doing the same job as we did above (i.e.,
2564  // distribute the content of the local vector into the global one), need
2565  // to account for inhomogeneities here: thie corresponds to eliminating
2566  // the respective column in the local matrix with value on the right
2567  // hand side.
2568  if (use_vectors == true)
2569  {
2570  const double val = resolve_vector_entry (i, global_rows,
2571  local_vector,
2572  local_dof_indices,
2573  local_matrix);
2574 
2575  if (val != 0)
2576  global_vector(row) += static_cast<typename VectorType::value_type>(val);
2577  }
2578  }
2579 
2580  internals::set_matrix_diagonals (global_rows, local_dof_indices,
2581  local_matrix, *this,
2582  global_matrix, global_vector, use_inhomogeneities_for_rhs);
2583 }
2584 
2585 
2586 
2587 template <typename MatrixType>
2588 void
2590  const FullMatrix<double> &local_matrix,
2591  const std::vector<size_type> &row_indices,
2592  const std::vector<size_type> &col_indices,
2593  MatrixType &global_matrix) const
2594 {
2595  typedef double number;
2596 
2597  AssertDimension (local_matrix.m(), row_indices.size());
2598  AssertDimension (local_matrix.n(), col_indices.size());
2599  //Assert (sorted == true, ExcMatrixNotClosed());
2600 
2601  const size_type n_local_row_dofs = row_indices.size();
2602  const size_type n_local_col_dofs = col_indices.size();
2603 
2605  scratch_data;
2606  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
2607  global_rows.reinit(n_local_row_dofs);
2608  internals::GlobalRowsFromLocal &global_cols = scratch_data->global_columns;
2609  global_cols.reinit(n_local_col_dofs);
2610  make_sorted_row_list (row_indices, global_rows);
2611  make_sorted_row_list (col_indices, global_cols);
2612 
2613  const size_type n_actual_row_dofs = global_rows.size();
2614  const size_type n_actual_col_dofs = global_cols.size();
2615 
2616  // create arrays for the column data (indices and values) that will then be
2617  // written into the matrix. Shortcut for deal.II sparse matrix
2618  std::vector<size_type> &cols = scratch_data->columns;
2619  std::vector<number> &vals = scratch_data->values;
2620  cols.resize(n_actual_col_dofs);
2621  vals.resize(n_actual_col_dofs);
2622 
2623  // now do the actual job.
2624  for (size_type i=0; i<n_actual_row_dofs; ++i)
2625  {
2626  const size_type row = global_rows.global_row(i);
2627 
2628  // calculate all the data that will be written into the matrix row.
2629  size_type *col_ptr = &cols[0];
2630  number *val_ptr = &vals[0];
2631  internals::resolve_matrix_row (global_rows, global_cols, i, 0,
2632  n_actual_col_dofs,
2633  local_matrix, col_ptr, val_ptr);
2634  const size_type n_values = col_ptr - &cols[0];
2635  if (n_values > 0)
2636  global_matrix.add(row, n_values, &cols[0], &vals[0], false, true);
2637  }
2638 }
2639 
2640 
2641 // similar function as above, but now specialized for block matrices. See the
2642 // other function for additional comments.
2643 template <typename MatrixType, typename VectorType>
2644 void
2647  const Vector<double> &local_vector,
2648  const std::vector<size_type> &local_dof_indices,
2649  MatrixType &global_matrix,
2650  VectorType &global_vector,
2651  bool use_inhomogeneities_for_rhs,
2653 {
2654  const bool use_vectors = (local_vector.size() == 0 &&
2655  global_vector.size() == 0) ? false : true;
2656  typedef typename MatrixType::value_type number;
2657  const bool use_dealii_matrix =
2659 
2660  AssertDimension (local_matrix.n(), local_dof_indices.size());
2661  AssertDimension (local_matrix.m(), local_dof_indices.size());
2662  Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
2663  Assert (global_matrix.n_block_rows() == global_matrix.n_block_cols(),
2664  ExcNotQuadratic());
2665  if (use_vectors == true)
2666  {
2667  AssertDimension (local_matrix.m(), local_vector.size());
2668  AssertDimension (global_matrix.m(), global_vector.size());
2669  }
2670  Assert (sorted == true, ExcMatrixNotClosed());
2671 
2673  scratch_data;
2674 
2675  const size_type n_local_dofs = local_dof_indices.size();
2676  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
2677  global_rows.reinit(n_local_dofs);
2678 
2679  make_sorted_row_list (local_dof_indices, global_rows);
2680  const size_type n_actual_dofs = global_rows.size();
2681 
2682  std::vector<size_type> &global_indices = scratch_data->vector_indices;
2683  if (use_vectors == true)
2684  {
2685  global_indices.resize(n_actual_dofs);
2686  for (size_type i=0; i<n_actual_dofs; ++i)
2687  global_indices[i] = global_rows.global_row(i);
2688  }
2689 
2690  // additional construct that also takes care of block indices.
2691  const size_type num_blocks = global_matrix.n_block_rows();
2692  std::vector<size_type> &block_starts = scratch_data->block_starts;
2693  block_starts.resize(num_blocks+1);
2694  internals::make_block_starts (global_matrix, global_rows, block_starts);
2695 
2696  std::vector<size_type> &cols = scratch_data->columns;
2697  std::vector<number> &vals = scratch_data->values;
2698  if (use_dealii_matrix == false)
2699  {
2700  cols.resize (n_actual_dofs);
2701  vals.resize (n_actual_dofs);
2702  }
2703 
2704  // the basic difference to the non-block variant from now onwards is that we
2705  // go through the blocks of the matrix separately, which allows us to set
2706  // the block entries individually
2707  for (size_type block=0; block<num_blocks; ++block)
2708  {
2709  const size_type next_block = block_starts[block+1];
2710  for (size_type i=block_starts[block]; i<next_block; ++i)
2711  {
2712  const size_type row = global_rows.global_row(i);
2713 
2714  for (size_type block_col=0; block_col<num_blocks; ++block_col)
2715  {
2716  const size_type start_block = block_starts[block_col],
2717  end_block = block_starts[block_col+1];
2718  if (use_dealii_matrix == false)
2719  {
2720  size_type *col_ptr = &cols[0];
2721  number *val_ptr = &vals[0];
2722  internals::resolve_matrix_row (global_rows, global_rows, i,
2723  start_block, end_block,
2724  local_matrix, col_ptr, val_ptr);
2725  const size_type n_values = col_ptr - &cols[0];
2726  if (n_values > 0)
2727  global_matrix.block(block, block_col).add(row, n_values,
2728  &cols[0], &vals[0],
2729  false, true);
2730  }
2731  else
2732  {
2733  SparseMatrix<number> *sparse_matrix
2734  = dynamic_cast<SparseMatrix<number> *>(&global_matrix.block(block,
2735  block_col));
2736  Assert (sparse_matrix != 0, ExcInternalError());
2737  internals::resolve_matrix_row (global_rows, i, start_block,
2738  end_block, local_matrix, sparse_matrix);
2739  }
2740  }
2741 
2742  if (use_vectors == true)
2743  {
2744  const double val = resolve_vector_entry (i, global_rows,
2745  local_vector,
2746  local_dof_indices,
2747  local_matrix);
2748 
2749  if (val != 0)
2750  global_vector(global_indices[i]) +=
2751  static_cast<typename VectorType::value_type>(val);
2752  }
2753  }
2754  }
2755 
2756  internals::set_matrix_diagonals (global_rows, local_dof_indices,
2757  local_matrix, *this,
2758  global_matrix, global_vector, use_inhomogeneities_for_rhs);
2759 }
2760 
2761 
2762 
2763 template <typename SparsityType>
2764 void
2766 add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
2767  SparsityType &sparsity_pattern,
2768  const bool keep_constrained_entries,
2769  const Table<2,bool> &dof_mask,
2771 {
2772  Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
2773 
2774  const size_type n_local_dofs = local_dof_indices.size();
2775  bool dof_mask_is_active = false;
2776  if (dof_mask.n_rows() == n_local_dofs)
2777  {
2778  dof_mask_is_active = true;
2779  AssertDimension (dof_mask.n_cols(), n_local_dofs);
2780  }
2781 
2783 
2784  // if the dof mask is not active, all we have to do is to add some indices
2785  // in a matrix format. To do this, we first create an array of all the
2786  // indices that are to be added. these indices are the local dof indices
2787  // plus some indices that come from constraints.
2788  if (dof_mask_is_active == false)
2789  {
2790  std::vector<size_type> &actual_dof_indices = scratch_data->columns;
2791  actual_dof_indices.resize(n_local_dofs);
2792  make_sorted_row_list (local_dof_indices, actual_dof_indices);
2793  const size_type n_actual_dofs = actual_dof_indices.size();
2794 
2795  // now add the indices we collected above to the sparsity pattern. Very
2796  // easy here - just add the same array to all the rows...
2797  for (size_type i=0; i<n_actual_dofs; ++i)
2798  sparsity_pattern.add_entries(actual_dof_indices[i],
2799  actual_dof_indices.begin(),
2800  actual_dof_indices.end(),
2801  true);
2802 
2803  // need to add the whole row and column structure in case we keep
2804  // constrained entries. Unfortunately, we can't use the nice matrix
2805  // structure we use elsewhere, so manually add those indices one by one.
2806  for (size_type i=0; i<n_local_dofs; i++)
2807  if (is_constrained(local_dof_indices[i]))
2808  {
2809  if (keep_constrained_entries == true)
2810  for (size_type j=0; j<n_local_dofs; j++)
2811  {
2812  sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
2813  sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
2814  }
2815  else
2816  sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]);
2817  }
2818 
2819  return;
2820  }
2821 
2822 
2823  // complicated case: we need to filter out some indices. then the function
2824  // gets similar to the function for distributing matrix entries, see there
2825  // for additional comments.
2826  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
2827  global_rows.reinit(n_local_dofs);
2828  make_sorted_row_list (local_dof_indices, global_rows);
2829  const size_type n_actual_dofs = global_rows.size();
2830 
2831  // create arrays for the column indices that will then be written into the
2832  // sparsity pattern.
2833  std::vector<size_type> &cols = scratch_data->columns;
2834  cols.resize(n_actual_dofs);
2835 
2836  for (size_type i=0; i<n_actual_dofs; ++i)
2837  {
2838  std::vector<size_type>::iterator col_ptr = cols.begin();
2839  const size_type row = global_rows.global_row(i);
2840  internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
2841  dof_mask, col_ptr);
2842 
2843  // finally, write all the information that accumulated under the given
2844  // process into the global matrix row and into the vector
2845  if (col_ptr != cols.begin())
2846  sparsity_pattern.add_entries(row, cols.begin(), col_ptr,
2847  true);
2848  }
2849  internals::set_sparsity_diagonals (global_rows, local_dof_indices,
2850  dof_mask, keep_constrained_entries,
2851  sparsity_pattern);
2852 }
2853 
2854 
2855 
2856 
2857 template <typename SparsityType>
2858 void
2860 add_entries_local_to_global (const std::vector<size_type> &row_indices,
2861  const std::vector<size_type> &col_indices,
2862  SparsityType &sparsity_pattern,
2863  const bool keep_constrained_entries,
2864  const Table<2,bool> &dof_mask) const
2865 {
2866  const size_type n_local_rows = row_indices.size();
2867  const size_type n_local_cols = col_indices.size();
2868  bool dof_mask_is_active = false;
2869  if (dof_mask.n_rows() == n_local_rows && dof_mask.n_cols() == n_local_cols)
2870  dof_mask_is_active = true;
2871 
2872  // if the dof mask is not active, all we have to do is to add some indices
2873  // in a matrix format. To do this, we first create an array of all the
2874  // indices that are to be added. these indices are the local dof indices
2875  // plus some indices that come from constraints.
2876  if (dof_mask_is_active == false)
2877  {
2878  std::vector<size_type> actual_row_indices (n_local_rows);
2879  std::vector<size_type> actual_col_indices (n_local_cols);
2880  make_sorted_row_list (row_indices, actual_row_indices);
2881  make_sorted_row_list (col_indices, actual_col_indices);
2882  const size_type n_actual_rows = actual_row_indices.size();
2883 
2884  // now add the indices we collected above to the sparsity pattern. Very
2885  // easy here - just add the same array to all the rows...
2886  for (size_type i=0; i<n_actual_rows; ++i)
2887  sparsity_pattern.add_entries(actual_row_indices[i],
2888  actual_col_indices.begin(),
2889  actual_col_indices.end(),
2890  true);
2891  return;
2892  }
2893 
2894  // if constrained entries should be kept, need to add rows and columns of
2895  // those to the sparsity pattern
2896  if (keep_constrained_entries == true)
2897  {
2898  for (size_type i=0; i<row_indices.size(); i++)
2899  if (is_constrained(row_indices[i]))
2900  for (size_type j=0; j<col_indices.size(); j++)
2901  sparsity_pattern.add (row_indices[i], col_indices[j]);
2902  for (size_type i=0; i<col_indices.size(); i++)
2903  if (is_constrained(col_indices[i]))
2904  for (size_type j=0; j<row_indices.size(); j++)
2905  sparsity_pattern.add (row_indices[j], col_indices[i]);
2906  }
2907 
2908 
2909  // TODO: implement this
2910  Assert (false, ExcNotImplemented());
2911 }
2912 
2913 
2914 
2915 
2916 template <typename SparsityType>
2917 void
2919 add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
2920  SparsityType &sparsity_pattern,
2921  const bool keep_constrained_entries,
2922  const Table<2,bool> &dof_mask,
2924 {
2925  // just as the other add_entries_local_to_global function, but now
2926  // specialized for block matrices.
2927  Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
2928  Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(),
2929  ExcNotQuadratic());
2930 
2931  const size_type n_local_dofs = local_dof_indices.size();
2932  const size_type num_blocks = sparsity_pattern.n_block_rows();
2933 
2935 
2936  bool dof_mask_is_active = false;
2937  if (dof_mask.n_rows() == n_local_dofs)
2938  {
2939  dof_mask_is_active = true;
2940  AssertDimension (dof_mask.n_cols(), n_local_dofs);
2941  }
2942 
2943  if (dof_mask_is_active == false)
2944  {
2945  std::vector<size_type> &actual_dof_indices = scratch_data->columns;
2946  actual_dof_indices.resize(n_local_dofs);
2947  make_sorted_row_list (local_dof_indices, actual_dof_indices);
2948  const size_type n_actual_dofs = actual_dof_indices.size();
2949 
2950  // additional construct that also takes care of block indices.
2951  std::vector<size_type> &block_starts = scratch_data->block_starts;
2952  block_starts.resize(num_blocks+1);
2953  internals::make_block_starts (sparsity_pattern, actual_dof_indices,
2954  block_starts);
2955 
2956  for (size_type block=0; block<num_blocks; ++block)
2957  {
2958  const size_type next_block = block_starts[block+1];
2959  for (size_type i=block_starts[block]; i<next_block; ++i)
2960  {
2961  Assert (i<n_actual_dofs, ExcInternalError());
2962  const size_type row = actual_dof_indices[i];
2963  Assert (row < sparsity_pattern.block(block,0).n_rows(),
2964  ExcInternalError());
2965  std::vector<size_type>::iterator index_it = actual_dof_indices.begin();
2966  for (size_type block_col = 0; block_col<num_blocks; ++block_col)
2967  {
2968  const size_type next_block_col = block_starts[block_col+1];
2969  sparsity_pattern.block(block,block_col).
2970  add_entries(row,
2971  index_it,
2972  actual_dof_indices.begin() + next_block_col,
2973  true);
2974  index_it = actual_dof_indices.begin() + next_block_col;
2975  }
2976  }
2977  }
2978 
2979  for (size_type i=0; i<n_local_dofs; i++)
2980  if (is_constrained(local_dof_indices[i]))
2981  {
2982  if (keep_constrained_entries == true)
2983  for (size_type j=0; j<n_local_dofs; j++)
2984  {
2985  sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
2986  sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
2987  }
2988  else
2989  sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]);
2990  }
2991 
2992  return;
2993  }
2994 
2995  // difficult case with dof_mask, similar to the distribute_local_to_global
2996  // function for block matrices
2997  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
2998  global_rows.reinit(n_local_dofs);
2999  make_sorted_row_list (local_dof_indices, global_rows);
3000  const size_type n_actual_dofs = global_rows.size();
3001 
3002  // additional construct that also takes care of block indices.
3003  std::vector<size_type> &block_starts = scratch_data->block_starts;
3004  block_starts.resize(num_blocks+1);
3005  internals::make_block_starts(sparsity_pattern, global_rows, block_starts);
3006 
3007  std::vector<size_type> &cols = scratch_data->columns;
3008  cols.resize(n_actual_dofs);
3009 
3010  // the basic difference to the non-block variant from now onwards is that we
3011  // go through the blocks of the matrix separately.
3012  for (size_type block=0; block<num_blocks; ++block)
3013  {
3014  const size_type next_block = block_starts[block+1];
3015  for (size_type i=block_starts[block]; i<next_block; ++i)
3016  {
3017  const size_type row = global_rows.global_row(i);
3018  for (size_type block_col=0; block_col<num_blocks; ++block_col)
3019  {
3020  const size_type begin_block = block_starts[block_col],
3021  end_block = block_starts[block_col+1];
3022  std::vector<size_type>::iterator col_ptr = cols.begin();
3023  internals::resolve_matrix_row (global_rows, i, begin_block,
3024  end_block, dof_mask, col_ptr);
3025 
3026  sparsity_pattern.block(block, block_col).add_entries(row,
3027  cols.begin(),
3028  col_ptr,
3029  true);
3030  }
3031  }
3032  }
3033 
3034  internals::set_sparsity_diagonals (global_rows, local_dof_indices,
3035  dof_mask, keep_constrained_entries,
3036  sparsity_pattern);
3037 }
3038 
3039 
3040 DEAL_II_NAMESPACE_CLOSE
3041 
3042 #endif
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:731
const types::global_dof_index invalid_size_type
Definition: types.h:211
double get_inhomogeneity(const size_type line) const
std::vector< size_type > lines_cache
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:858
A class that provides a separate storage location on each thread that accesses the object...
bool is_constrained(const size_type index) const
const MPI_Comm & get_mpi_communicator() const
types::global_dof_index size() const
Definition: index_set.h:685
size_type m() const
static Threads::ThreadLocalStorage< ScratchData > scratch_data
::ExceptionBase & ExcMessage(std::string arg1)
void reinit(const size_type size, const bool fast=false)
size_type n() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:888
Auxiliary class aiding in the handling of block structures like in BlockVector or FESystem...
Definition: block_indices.h:55
void add_entries(const size_type line, const std::vector< std::pair< size_type, double > > &col_val_pairs)
void add_index(const types::global_dof_index index)
Definition: index_set.h:740
void add(const size_type i, const size_type j, const value_type value)
types::global_dof_index size_type
bool is_compressed() const
size_type n_cols() const
bool is_finite(const double x)
bool in_local_range(const size_type global_index) const
const_iterator begin() const
bool has_ghost_elements() const
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
size_type n() const
::ExceptionBase & ExcGhostsPresent()
number diag_element(const size_type i) const
unsigned int global_dof_index
Definition: types.h:100
#define Assert(cond, exc)
Definition: exceptions.h:299
void condense(const SparsityPattern &uncondensed, SparsityPattern &condensed) const
const BlockIndices & get_column_indices() const
const SparsityPattern & get_sparsity_pattern() const
size_type n_cols() const
std::size_t size() const
void add_entries_local_to_global(const std::vector< size_type > &local_dof_indices, SparsityType &sparsity_pattern, const bool keep_constrained_entries=true, const Table< 2, bool > &dof_mask=default_empty_table) const
size_type m() const
void reinit(const MPI_Comm &communicator, const size_type N, const size_type local_size, const bool fast=false)
void add(const size_type i, const size_type j, const number value)
std::pair< unsigned int, size_type > global_to_local(const size_type i) const
size_type calculate_line_index(const size_type line) const
const_iterator end() const
const MPI_Comm & get_mpi_communicator() const
IndexSet get_view(const types::global_dof_index begin, const types::global_dof_index end) const
const BlockIndices & get_row_indices() const
::ExceptionBase & ExcNumberNotFinite()
size_type n_rows() const
void shift(const size_type offset)
BlockType & block(const unsigned int row, const unsigned int column)
void set_zero(VectorType &vec) const
size_type n_rows() const
size_type n_constraints() const
void distribute(const VectorType &condensed, VectorType &uncondensed) const
::ExceptionBase & ExcNotImplemented()
void reinit(const VectorBase &v, const bool fast=false, const bool allow_different_maps=false)
const Epetra_MultiVector & trilinos_vector() const
std::vector< ConstraintLine > lines
::ExceptionBase & ExcInternalError()
bool is_compressed() const
size_type local_to_global(const unsigned int block, const size_type index) const
void make_sorted_row_list(const std::vector< size_type > &local_dof_indices, internals::GlobalRowsFromLocal &global_rows) const
const BlockSparsityPattern & get_sparsity_pattern() const
bool is_element(const types::global_dof_index index) const
Definition: index_set.h:811
double resolve_vector_entry(const size_type i, const internals::GlobalRowsFromLocal &global_rows, const Vector< double > &local_vector, const std::vector< size_type > &local_dof_indices, const FullMatrix< double > &local_matrix) const
static const size_type invalid_entry