Reference documentation for deal.II version 8.1.0
trilinos_sparse_matrix.h
1 // ---------------------------------------------------------------------
2 // @f$Id: trilinos_sparse_matrix.h 31684 2013-11-16 12:46:15Z bangerth @f$
3 //
4 // Copyright (C) 2008 - 2013 by the deal.II authors
5 //
6 // This file is part of the deal.II library.
7 //
8 // The deal.II library is free software; you can use it, redistribute
9 // it, and/or modify it under the terms of the GNU Lesser General
10 // Public License as published by the Free Software Foundation; either
11 // version 2.1 of the License, or (at your option) any later version.
12 // The full text of the license can be found in the file LICENSE at
13 // the top level of the deal.II distribution.
14 //
15 // ---------------------------------------------------------------------
16 
17 #ifndef __deal2__trilinos_sparse_matrix_h
18 #define __deal2__trilinos_sparse_matrix_h
19 
20 
21 #include <deal.II/base/config.h>
22 
23 #ifdef DEAL_II_WITH_TRILINOS
24 
25 # include <deal.II/base/std_cxx1x/shared_ptr.h>
26 # include <deal.II/base/subscriptor.h>
27 # include <deal.II/base/index_set.h>
28 # include <deal.II/lac/full_matrix.h>
29 # include <deal.II/lac/exceptions.h>
30 # include <deal.II/lac/trilinos_vector.h>
31 # include <deal.II/lac/vector_view.h>
32 
33 # include <vector>
34 # include <cmath>
35 # include <memory>
36 
37 # define TrilinosScalar double
38 # include <Epetra_FECrsMatrix.h>
39 # include <Epetra_Map.h>
40 # include <Epetra_CrsGraph.h>
41 # include <Epetra_MultiVector.h>
42 # ifdef DEAL_II_WITH_MPI
43 # include <Epetra_MpiComm.h>
44 # include "mpi.h"
45 # else
46 # include "Epetra_SerialComm.h"
47 # endif
48 
50 
51 // forward declarations
52 template <typename MatrixType> class BlockMatrixBase;
53 
54 template <typename number> class SparseMatrix;
55 class SparsityPattern;
56 
57 namespace TrilinosWrappers
58 {
59  // forward declarations
60  class SparseMatrix;
61  class SparsityPattern;
62 
67  {
68  // forward declaration
69  template <bool Constness> class Iterator;
70 
74  DeclException0 (ExcBeyondEndOfMatrix);
75 
79  DeclException3 (ExcAccessToNonlocalRow,
80  std::size_t, std::size_t, std::size_t,
81  << "You tried to access row " << arg1
82  << " of a distributed sparsity pattern, "
83  << " but only rows " << arg2 << " through " << arg3
84  << " are stored locally and can be accessed.");
85 
103  {
104  public:
108  typedef ::types::global_dof_index size_type;
109 
114  const size_type row,
115  const size_type index);
116 
121  size_type row() const;
122 
127  size_type index() const;
128 
133  size_type column() const;
134 
135  protected:
151  size_type a_row;
152 
156  size_type a_index;
157 
166  void visit_present_row ();
167 
193  std_cxx1x::shared_ptr<std::vector<size_type> > colnum_cache;
194 
199  std_cxx1x::shared_ptr<std::vector<TrilinosScalar> > value_cache;
200  };
201 
212  template <bool Constess>
213  class Accessor : public AccessorBase
214  {
218  TrilinosScalar value() const;
219 
223  TrilinosScalar &value();
224  };
225 
229  template<>
230  class Accessor<true> : public AccessorBase
231  {
232  public:
238  typedef const SparseMatrix MatrixType;
239 
246  Accessor (MatrixType *matrix,
247  const size_type row,
248  const size_type index);
249 
255  template <bool Other>
256  Accessor (const Accessor<Other> &a);
257 
261  TrilinosScalar value() const;
262 
263  private:
268  template <bool> friend class Iterator;
269  };
270 
274  template<>
275  class Accessor<false> : public AccessorBase
276  {
277  class Reference
278  {
279  public:
283  Reference (const Accessor<false> &accessor);
284 
289  operator TrilinosScalar () const;
290 
295  const Reference &operator = (const TrilinosScalar n) const;
296 
301  const Reference &operator += (const TrilinosScalar n) const;
302 
308  const Reference &operator -= (const TrilinosScalar n) const;
309 
315  const Reference &operator *= (const TrilinosScalar n) const;
316 
322  const Reference &operator /= (const TrilinosScalar n) const;
323 
324  private:
331  };
332 
333  public:
340 
347  Accessor (MatrixType *matrix,
348  const size_type row,
349  const size_type index);
350 
354  Reference value() const;
355 
356  private:
361  template <bool> friend class Iterator;
366  friend class Reference;
367  };
368 
383  template <bool Constness>
384  class Iterator
385  {
386  public:
390  typedef ::types::global_dof_index size_type;
391 
398 
405  Iterator (MatrixType *matrix,
406  const size_type row,
407  const size_type index);
408 
413  template <bool Other>
414  Iterator(const Iterator<Other> &other);
415 
420 
425 
429  const Accessor<Constness> &operator* () const;
430 
434  const Accessor<Constness> *operator-> () const;
435 
441  bool operator == (const Iterator<Constness> &) const;
442 
446  bool operator != (const Iterator<Constness> &) const;
447 
456  bool operator < (const Iterator<Constness> &) const;
457 
461  bool operator > (const Iterator<Constness> &) const;
462 
466  DeclException2 (ExcInvalidIndexWithinRow,
467  size_type, size_type,
468  << "Attempt to access element " << arg2
469  << " of row " << arg1
470  << " which doesn't have that many elements.");
471 
472  private:
478 
479  template <bool Other> friend class Iterator;
480  };
481 
482  }
483 
484 
515  class SparseMatrix : public Subscriptor
516  {
517  public:
521  typedef ::types::global_dof_index size_type;
522 
535  struct Traits
536  {
542  static const bool zero_addition_can_be_elided = true;
543  };
544 
550 
556 
562  typedef TrilinosScalar value_type;
563 
572  SparseMatrix ();
573 
583  SparseMatrix (const size_type m,
584  const size_type n,
585  const unsigned int n_max_entries_per_row);
586 
597  SparseMatrix (const size_type m,
598  const size_type n,
599  const std::vector<unsigned int> &n_entries_per_row);
600 
605  SparseMatrix (const SparsityPattern &InputSparsityPattern);
606 
614  SparseMatrix (const SparseMatrix &InputMatrix);
615 
621  virtual ~SparseMatrix ();
622 
651  template<typename SparsityType>
652  void reinit (const SparsityType &sparsity_pattern);
653 
665  void reinit (const SparsityPattern &sparsity_pattern);
666 
677  void reinit (const SparseMatrix &sparse_matrix);
678 
708  template <typename number>
709  void reinit (const ::SparseMatrix<number> &dealii_sparse_matrix,
710  const double drop_tolerance=1e-13,
711  const bool copy_values=true,
712  const ::SparsityPattern *use_this_sparsity=0);
713 
721  void reinit (const Epetra_CrsMatrix &input_matrix,
722  const bool copy_values = true);
724 
752  SparseMatrix (const Epetra_Map &parallel_partitioning,
753  const size_type n_max_entries_per_row = 0);
754 
768  SparseMatrix (const Epetra_Map &parallel_partitioning,
769  const std::vector<unsigned int> &n_entries_per_row);
770 
800  SparseMatrix (const Epetra_Map &row_parallel_partitioning,
801  const Epetra_Map &col_parallel_partitioning,
802  const size_type n_max_entries_per_row = 0);
803 
833  SparseMatrix (const Epetra_Map &row_parallel_partitioning,
834  const Epetra_Map &col_parallel_partitioning,
835  const std::vector<unsigned int> &n_entries_per_row);
836 
874  template<typename SparsityType>
875  void reinit (const Epetra_Map &parallel_partitioning,
876  const SparsityType &sparsity_pattern,
877  const bool exchange_data = false);
878 
900  template<typename SparsityType>
901  void reinit (const Epetra_Map &row_parallel_partitioning,
902  const Epetra_Map &col_parallel_partitioning,
903  const SparsityType &sparsity_pattern,
904  const bool exchange_data = false);
905 
934  template <typename number>
935  void reinit (const Epetra_Map &parallel_partitioning,
936  const ::SparseMatrix<number> &dealii_sparse_matrix,
937  const double drop_tolerance=1e-13,
938  const bool copy_values=true,
939  const ::SparsityPattern *use_this_sparsity=0);
940 
962  template <typename number>
963  void reinit (const Epetra_Map &row_parallel_partitioning,
964  const Epetra_Map &col_parallel_partitioning,
965  const ::SparseMatrix<number> &dealii_sparse_matrix,
966  const double drop_tolerance=1e-13,
967  const bool copy_values=true,
968  const ::SparsityPattern *use_this_sparsity=0);
970 
998  SparseMatrix (const IndexSet &parallel_partitioning,
999  const MPI_Comm &communicator = MPI_COMM_WORLD,
1000  const unsigned int n_max_entries_per_row = 0);
1001 
1016  SparseMatrix (const IndexSet &parallel_partitioning,
1017  const MPI_Comm &communicator,
1018  const std::vector<unsigned int> &n_entries_per_row);
1019 
1047  SparseMatrix (const IndexSet &row_parallel_partitioning,
1048  const IndexSet &col_parallel_partitioning,
1049  const MPI_Comm &communicator = MPI_COMM_WORLD,
1050  const size_type n_max_entries_per_row = 0);
1051 
1081  SparseMatrix (const IndexSet &row_parallel_partitioning,
1082  const IndexSet &col_parallel_partitioning,
1083  const MPI_Comm &communicator,
1084  const std::vector<unsigned int> &n_entries_per_row);
1085 
1125  template<typename SparsityType>
1126  void reinit (const IndexSet &parallel_partitioning,
1127  const SparsityType &sparsity_pattern,
1128  const MPI_Comm &communicator = MPI_COMM_WORLD,
1129  const bool exchange_data = false);
1130 
1152  template<typename SparsityType>
1153  void reinit (const IndexSet &row_parallel_partitioning,
1154  const IndexSet &col_parallel_partitioning,
1155  const SparsityType &sparsity_pattern,
1156  const MPI_Comm &communicator = MPI_COMM_WORLD,
1157  const bool exchange_data = false);
1158 
1187  template <typename number>
1188  void reinit (const IndexSet &parallel_partitioning,
1189  const ::SparseMatrix<number> &dealii_sparse_matrix,
1190  const MPI_Comm &communicator = MPI_COMM_WORLD,
1191  const double drop_tolerance=1e-13,
1192  const bool copy_values=true,
1193  const ::SparsityPattern *use_this_sparsity=0);
1194 
1216  template <typename number>
1217  void reinit (const IndexSet &row_parallel_partitioning,
1218  const IndexSet &col_parallel_partitioning,
1219  const ::SparseMatrix<number> &dealii_sparse_matrix,
1220  const MPI_Comm &communicator = MPI_COMM_WORLD,
1221  const double drop_tolerance=1e-13,
1222  const bool copy_values=true,
1223  const ::SparsityPattern *use_this_sparsity=0);
1225 
1229 
1234  size_type m () const;
1235 
1240  size_type n () const;
1241 
1256  unsigned int local_size () const;
1257 
1274  std::pair<size_type, size_type>
1275  local_range () const;
1276 
1282  bool in_local_range (const size_type index) const;
1283 
1288  size_type n_nonzero_elements () const;
1289 
1294  unsigned int row_length (const size_type row) const;
1295 
1305  bool is_compressed () const;
1306 
1315  size_type memory_consumption () const;
1316 
1318 
1322 
1339  SparseMatrix &
1340  operator = (const double d);
1341 
1352  void clear ();
1353 
1397  void compress (::VectorOperation::values operation);
1398 
1402  void compress () DEAL_II_DEPRECATED;
1403 
1430  void set (const size_type i,
1431  const size_type j,
1432  const TrilinosScalar value);
1433 
1470  void set (const std::vector<size_type> &indices,
1471  const FullMatrix<TrilinosScalar> &full_matrix,
1472  const bool elide_zero_values = false);
1473 
1481  void set (const std::vector<size_type> &row_indices,
1482  const std::vector<size_type> &col_indices,
1483  const FullMatrix<TrilinosScalar> &full_matrix,
1484  const bool elide_zero_values = false);
1485 
1513  void set (const size_type row,
1514  const std::vector<size_type> &col_indices,
1515  const std::vector<TrilinosScalar> &values,
1516  const bool elide_zero_values = false);
1517 
1545  void set (const size_type row,
1546  const size_type n_cols,
1547  const size_type *col_indices,
1548  const TrilinosScalar *values,
1549  const bool elide_zero_values = false);
1550 
1566  void add (const size_type i,
1567  const size_type j,
1568  const TrilinosScalar value);
1569 
1606  void add (const std::vector<size_type> &indices,
1607  const FullMatrix<TrilinosScalar> &full_matrix,
1608  const bool elide_zero_values = true);
1609 
1617  void add (const std::vector<size_type> &row_indices,
1618  const std::vector<size_type> &col_indices,
1619  const FullMatrix<TrilinosScalar> &full_matrix,
1620  const bool elide_zero_values = true);
1621 
1648  void add (const size_type row,
1649  const std::vector<size_type> &col_indices,
1650  const std::vector<TrilinosScalar> &values,
1651  const bool elide_zero_values = true);
1652 
1678  void add (const size_type row,
1679  const size_type n_cols,
1680  const size_type *col_indices,
1681  const TrilinosScalar *values,
1682  const bool elide_zero_values = true,
1683  const bool col_indices_are_sorted = false);
1684 
1689  SparseMatrix &operator *= (const TrilinosScalar factor);
1690 
1695  SparseMatrix &operator /= (const TrilinosScalar factor);
1696 
1701  void copy_from (const SparseMatrix &source);
1702 
1715  void add (const TrilinosScalar factor,
1716  const SparseMatrix &matrix);
1717 
1757  void clear_row (const size_type row,
1758  const TrilinosScalar new_diag_value = 0);
1759 
1776  void clear_rows (const std::vector<size_type> &rows,
1777  const TrilinosScalar new_diag_value = 0);
1778 
1783  void transpose ();
1784 
1786 
1790 
1810  TrilinosScalar operator () (const size_type i,
1811  const size_type j) const;
1812 
1850  TrilinosScalar el (const size_type i,
1851  const size_type j) const;
1852 
1864  TrilinosScalar diag_element (const size_type i) const;
1865 
1867 
1871 
1893  template<typename VectorType>
1894  void vmult (VectorType &dst,
1895  const VectorType &src) const;
1896 
1919  template <typename VectorType>
1920  void Tvmult (VectorType &dst,
1921  const VectorType &src) const;
1922 
1946  template<typename VectorType>
1947  void vmult_add (VectorType &dst,
1948  const VectorType &src) const;
1949 
1973  template <typename VectorType>
1974  void Tvmult_add (VectorType &dst,
1975  const VectorType &src) const;
1976 
2026  TrilinosScalar matrix_norm_square (const VectorBase &v) const;
2027 
2063  TrilinosScalar matrix_scalar_product (const VectorBase &u,
2064  const VectorBase &v) const;
2065 
2100  TrilinosScalar residual (VectorBase &dst,
2101  const VectorBase &x,
2102  const VectorBase &b) const;
2103 
2128  void mmult (SparseMatrix &C,
2129  const SparseMatrix &B,
2130  const VectorBase &V = VectorBase()) const;
2131 
2132 
2159  void Tmmult (SparseMatrix &C,
2160  const SparseMatrix &B,
2161  const VectorBase &V = VectorBase()) const;
2162 
2164 
2168 
2185  TrilinosScalar l1_norm () const;
2186 
2202  TrilinosScalar linfty_norm () const;
2203 
2211  TrilinosScalar frobenius_norm () const;
2212 
2214 
2218 
2224  const Epetra_CrsMatrix &trilinos_matrix () const;
2225 
2233  const Epetra_CrsGraph &trilinos_sparsity_pattern () const;
2234 
2244  const Epetra_Map &domain_partitioner () const;
2245 
2255  const Epetra_Map &range_partitioner () const;
2256 
2264  const Epetra_Map &row_partitioner () const;
2265 
2275  const Epetra_Map &col_partitioner () const;
2277 
2281 
2286  const_iterator begin () const;
2287 
2291  const_iterator end () const;
2292 
2306  const_iterator begin (const size_type r) const;
2307 
2323  const_iterator end (const size_type r) const;
2324 
2329  iterator begin ();
2330 
2334  iterator end ();
2335 
2349  iterator begin (const size_type r);
2350 
2366  iterator end (const size_type r);
2367 
2369 
2373 
2383  void write_ascii ();
2384 
2397  void print (std::ostream &out,
2398  const bool write_extended_trilinos_info = false) const;
2399 
2401 
2408  DeclException1 (ExcTrilinosError,
2409  int,
2410  << "An error with error number " << arg1
2411  << " occurred while calling a Trilinos function");
2412 
2416  DeclException2 (ExcInvalidIndex,
2417  size_type, size_type,
2418  << "The entry with index <" << arg1 << ',' << arg2
2419  << "> does not exist.");
2420 
2424  DeclException0 (ExcSourceEqualsDestination);
2425 
2429  DeclException0 (ExcMatrixNotCompressed);
2430 
2434  DeclException4 (ExcAccessToNonLocalElement,
2435  size_type, size_type, size_type, size_type,
2436  << "You tried to access element (" << arg1
2437  << "/" << arg2 << ")"
2438  << " of a distributed matrix, but only rows "
2439  << arg3 << " through " << arg4
2440  << " are stored locally and can be accessed.");
2441 
2445  DeclException2 (ExcAccessToNonPresentElement,
2446  size_type, size_type,
2447  << "You tried to access element (" << arg1
2448  << "/" << arg2 << ")"
2449  << " of a sparse matrix, but it appears to not"
2450  << " exist in the Trilinos sparsity pattern.");
2452 
2453 
2454 
2455  protected:
2456 
2479  void prepare_add();
2480 
2488  void prepare_set();
2489 
2490 
2491 
2492  private:
2493 
2501  std_cxx1x::shared_ptr<Epetra_Map> column_space_map;
2502 
2513  std_cxx1x::shared_ptr<Epetra_FECrsMatrix> matrix;
2514 
2536  Epetra_CombineMode last_action;
2537 
2544 
2551  };
2552 
2553 
2554 
2555 // -------------------------- inline and template functions ----------------------
2556 
2557 
2558 #ifndef DOXYGEN
2559 
2560  namespace SparseMatrixIterators
2561  {
2562  inline
2563  AccessorBase::AccessorBase(SparseMatrix *matrix, size_type row, size_type index)
2564  :
2565  matrix(matrix),
2566  a_row(row),
2567  a_index(index)
2568  {
2569  visit_present_row ();
2570  }
2571 
2572 
2573  inline
2574  AccessorBase::size_type
2575  AccessorBase::row() const
2576  {
2577  Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
2578  return a_row;
2579  }
2580 
2581 
2582  inline
2583  AccessorBase::size_type
2584  AccessorBase::column() const
2585  {
2586  Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
2587  return (*colnum_cache)[a_index];
2588  }
2589 
2590 
2591  inline
2592  AccessorBase::size_type
2593  AccessorBase::index() const
2594  {
2595  Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
2596  return a_index;
2597  }
2598 
2599 
2600  inline
2601  Accessor<true>::Accessor (MatrixType *matrix,
2602  const size_type row,
2603  const size_type index)
2604  :
2605  AccessorBase(const_cast<SparseMatrix *>(matrix), row, index)
2606  {}
2607 
2608 
2609  template <bool Other>
2610  inline
2611  Accessor<true>::Accessor(const Accessor<Other> &other)
2612  :
2613  AccessorBase(other)
2614  {}
2615 
2616 
2617  inline
2618  TrilinosScalar
2619  Accessor<true>::value() const
2620  {
2621  Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
2622  return (*value_cache)[a_index];
2623  }
2624 
2625 
2626  inline
2627  Accessor<false>::Reference::Reference (
2628  const Accessor<false> &acc)
2629  :
2630  accessor(const_cast<Accessor<false>&>(acc))
2631  {}
2632 
2633 
2634  inline
2635  Accessor<false>::Reference::operator TrilinosScalar () const
2636  {
2637  return (*accessor.value_cache)[accessor.a_index];
2638  }
2639 
2640  inline
2641  const Accessor<false>::Reference &
2642  Accessor<false>::Reference::operator = (const TrilinosScalar n) const
2643  {
2644  (*accessor.value_cache)[accessor.a_index] = n;
2645  accessor.matrix->set(accessor.row(), accessor.column(),
2646  static_cast<TrilinosScalar>(*this));
2647  return *this;
2648  }
2649 
2650 
2651  inline
2652  const Accessor<false>::Reference &
2653  Accessor<false>::Reference::operator += (const TrilinosScalar n) const
2654  {
2655  (*accessor.value_cache)[accessor.a_index] += n;
2656  accessor.matrix->set(accessor.row(), accessor.column(),
2657  static_cast<TrilinosScalar>(*this));
2658  return *this;
2659  }
2660 
2661 
2662  inline
2663  const Accessor<false>::Reference &
2664  Accessor<false>::Reference::operator -= (const TrilinosScalar n) const
2665  {
2666  (*accessor.value_cache)[accessor.a_index] -= n;
2667  accessor.matrix->set(accessor.row(), accessor.column(),
2668  static_cast<TrilinosScalar>(*this));
2669  return *this;
2670  }
2671 
2672 
2673  inline
2674  const Accessor<false>::Reference &
2675  Accessor<false>::Reference::operator *= (const TrilinosScalar n) const
2676  {
2677  (*accessor.value_cache)[accessor.a_index] *= n;
2678  accessor.matrix->set(accessor.row(), accessor.column(),
2679  static_cast<TrilinosScalar>(*this));
2680  return *this;
2681  }
2682 
2683 
2684  inline
2685  const Accessor<false>::Reference &
2686  Accessor<false>::Reference::operator /= (const TrilinosScalar n) const
2687  {
2688  (*accessor.value_cache)[accessor.a_index] /= n;
2689  accessor.matrix->set(accessor.row(), accessor.column(),
2690  static_cast<TrilinosScalar>(*this));
2691  return *this;
2692  }
2693 
2694 
2695  inline
2696  Accessor<false>::Accessor (MatrixType *matrix,
2697  const size_type row,
2698  const size_type index)
2699  :
2700  AccessorBase(matrix, row, index)
2701  {}
2702 
2703 
2704  inline
2705  Accessor<false>::Reference
2706  Accessor<false>::value() const
2707  {
2708  Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
2709  return Reference(*this);
2710  }
2711 
2712 
2713 
2714  template <bool Constness>
2715  inline
2716  Iterator<Constness>::Iterator(MatrixType *matrix,
2717  const size_type row,
2718  const size_type index)
2719  :
2720  accessor(matrix, row, index)
2721  {}
2722 
2723 
2724  template <bool Constness>
2725  template <bool Other>
2726  inline
2727  Iterator<Constness>::Iterator(const Iterator<Other> &other)
2728  :
2729  accessor(other.accessor)
2730  {}
2731 
2732 
2733  template <bool Constness>
2734  inline
2735  Iterator<Constness> &
2736  Iterator<Constness>::operator++ ()
2737  {
2738  Assert (accessor.a_row < accessor.matrix->m(), ExcIteratorPastEnd());
2739 
2740  ++accessor.a_index;
2741 
2742  // If at end of line: do one
2743  // step, then cycle until we
2744  // find a row with a nonzero
2745  // number of entries.
2746  if (accessor.a_index >= accessor.colnum_cache->size())
2747  {
2748  accessor.a_index = 0;
2749  ++accessor.a_row;
2750 
2751  while ((accessor.a_row < accessor.matrix->m())
2752  &&
2753  (accessor.matrix->row_length(accessor.a_row) == 0))
2754  ++accessor.a_row;
2755 
2756  accessor.visit_present_row();
2757  }
2758  return *this;
2759  }
2760 
2761 
2762  template <bool Constness>
2763  inline
2764  Iterator<Constness>
2765  Iterator<Constness>::operator++ (int)
2766  {
2767  const Iterator<Constness> old_state = *this;
2768  ++(*this);
2769  return old_state;
2770  }
2771 
2772 
2773 
2774  template <bool Constness>
2775  inline
2776  const Accessor<Constness> &
2777  Iterator<Constness>::operator* () const
2778  {
2779  return accessor;
2780  }
2781 
2782 
2783 
2784  template <bool Constness>
2785  inline
2786  const Accessor<Constness> *
2787  Iterator<Constness>::operator-> () const
2788  {
2789  return &accessor;
2790  }
2791 
2792 
2793 
2794  template <bool Constness>
2795  inline
2796  bool
2797  Iterator<Constness>::operator == (const Iterator<Constness> &other) const
2798  {
2799  return (accessor.a_row == other.accessor.a_row &&
2800  accessor.a_index == other.accessor.a_index);
2801  }
2802 
2803 
2804 
2805  template <bool Constness>
2806  inline
2807  bool
2808  Iterator<Constness>::operator != (const Iterator<Constness> &other) const
2809  {
2810  return ! (*this == other);
2811  }
2812 
2813 
2814 
2815  template <bool Constness>
2816  inline
2817  bool
2818  Iterator<Constness>::operator < (const Iterator<Constness> &other) const
2819  {
2820  return (accessor.row() < other.accessor.row() ||
2821  (accessor.row() == other.accessor.row() &&
2822  accessor.index() < other.accessor.index()));
2823  }
2824 
2825 
2826  template <bool Constness>
2827  inline
2828  bool
2829  Iterator<Constness>::operator > (const Iterator<Constness> &other) const
2830  {
2831  return (other < *this);
2832  }
2833 
2834  }
2835 
2836 
2837 
2838  inline
2840  SparseMatrix::begin() const
2841  {
2842  return const_iterator(this, 0, 0);
2843  }
2844 
2845 
2846 
2847  inline
2849  SparseMatrix::end() const
2850  {
2851  return const_iterator(this, m(), 0);
2852  }
2853 
2854 
2855 
2856  inline
2858  SparseMatrix::begin(const size_type r) const
2859  {
2860  Assert (r < m(), ExcIndexRange(r, 0, m()));
2861  if (row_length(r) > 0)
2862  return const_iterator(this, r, 0);
2863  else
2864  return end (r);
2865  }
2866 
2867 
2868 
2869  inline
2871  SparseMatrix::end(const size_type r) const
2872  {
2873  Assert (r < m(), ExcIndexRange(r, 0, m()));
2874 
2875  // place the iterator on the first entry
2876  // past this line, or at the end of the
2877  // matrix
2878  for (size_type i=r+1; i<m(); ++i)
2879  if (row_length(i) > 0)
2880  return const_iterator(this, i, 0);
2881 
2882  // if there is no such line, then take the
2883  // end iterator of the matrix
2884  return end();
2885  }
2886 
2887 
2888 
2889  inline
2892  {
2893  return iterator(this, 0, 0);
2894  }
2895 
2896 
2897 
2898  inline
2901  {
2902  return iterator(this, m(), 0);
2903  }
2904 
2905 
2906 
2907  inline
2909  SparseMatrix::begin(const size_type r)
2910  {
2911  Assert (r < m(), ExcIndexRange(r, 0, m()));
2912  if (row_length(r) > 0)
2913  return iterator(this, r, 0);
2914  else
2915  return end (r);
2916  }
2917 
2918 
2919 
2920  inline
2922  SparseMatrix::end(const size_type r)
2923  {
2924  Assert (r < m(), ExcIndexRange(r, 0, m()));
2925 
2926  // place the iterator on the first entry
2927  // past this line, or at the end of the
2928  // matrix
2929  for (size_type i=r+1; i<m(); ++i)
2930  if (row_length(i) > 0)
2931  return iterator(this, i, 0);
2932 
2933  // if there is no such line, then take the
2934  // end iterator of the matrix
2935  return end();
2936  }
2937 
2938 
2939 
2940  inline
2941  bool
2942  SparseMatrix::in_local_range (const size_type index) const
2943  {
2945 #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
2946  begin = matrix->RowMap().MinMyGID();
2947  end = matrix->RowMap().MaxMyGID()+1;
2948 #else
2949  begin = matrix->RowMap().MinMyGID();
2950  end = matrix->RowMap().MaxMyGID()+1;
2951 #endif
2952 
2953  return ((index >= static_cast<size_type>(begin)) &&
2954  (index < static_cast<size_type>(end)));
2955  }
2956 
2957 
2958 
2959  inline
2960  bool
2962  {
2963  return compressed;
2964  }
2965 
2966 
2967 
2968  inline
2969  void
2970  SparseMatrix::compress (::VectorOperation::values operation)
2971  {
2972 
2973  Epetra_CombineMode mode = last_action;
2974  if (last_action == Zero)
2975  {
2976  if ((operation==::VectorOperation::add) ||
2977  (operation==::VectorOperation::unknown))
2978  mode = Add;
2979  else if (operation==::VectorOperation::insert)
2980  mode = Insert;
2981  }
2982  else
2983  {
2984  Assert(
2985  ((last_action == Add) && (operation!=::VectorOperation::insert))
2986  ||
2987  ((last_action == Insert) && (operation!=::VectorOperation::add)),
2988  ExcMessage("operation and argument to compress() do not match"));
2989  }
2990 
2991  // flush buffers
2992  int ierr;
2993  ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
2994  true, mode);
2995 
2996  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2997 
2998  ierr = matrix->OptimizeStorage ();
2999  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3000 
3001  last_action = Zero;
3002 
3003  compressed = true;
3004  }
3005 
3006 
3007 
3008  inline
3009  void
3011  {
3012  compress(::VectorOperation::unknown);
3013  }
3014 
3015 
3016 
3017  inline
3018  SparseMatrix &
3019  SparseMatrix::operator = (const double d)
3020  {
3022  compress (::VectorOperation::unknown); // TODO: why do we do this? Should we not check for is_compressed?
3023 
3024  const int ierr = matrix->PutScalar(d);
3025  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3026 
3027  return *this;
3028  }
3029 
3030 
3031 
3032  // Inline the set() and add()
3033  // functions, since they will be
3034  // called frequently, and the
3035  // compiler can optimize away
3036  // some unnecessary loops when
3037  // the sizes are given at
3038  // compile time.
3039  inline
3040  void
3041  SparseMatrix::set (const size_type i,
3042  const size_type j,
3043  const TrilinosScalar value)
3044  {
3045 
3047 
3048  set (i, 1, &j, &value, false);
3049  }
3050 
3051 
3052 
3053  inline
3054  void
3055  SparseMatrix::set (const std::vector<size_type> &indices,
3056  const FullMatrix<TrilinosScalar> &values,
3057  const bool elide_zero_values)
3058  {
3059  Assert (indices.size() == values.m(),
3060  ExcDimensionMismatch(indices.size(), values.m()));
3061  Assert (values.m() == values.n(), ExcNotQuadratic());
3062 
3063  for (size_type i=0; i<indices.size(); ++i)
3064  set (indices[i], indices.size(), &indices[0], &values(i,0),
3065  elide_zero_values);
3066  }
3067 
3068 
3069 
3070  inline
3071  void
3072  SparseMatrix::set (const std::vector<size_type> &row_indices,
3073  const std::vector<size_type> &col_indices,
3074  const FullMatrix<TrilinosScalar> &values,
3075  const bool elide_zero_values)
3076  {
3077  Assert (row_indices.size() == values.m(),
3078  ExcDimensionMismatch(row_indices.size(), values.m()));
3079  Assert (col_indices.size() == values.n(),
3080  ExcDimensionMismatch(col_indices.size(), values.n()));
3081 
3082  for (size_type i=0; i<row_indices.size(); ++i)
3083  set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
3084  elide_zero_values);
3085  }
3086 
3087 
3088 
3089  inline
3090  void
3091  SparseMatrix::set (const size_type row,
3092  const std::vector<size_type> &col_indices,
3093  const std::vector<TrilinosScalar> &values,
3094  const bool elide_zero_values)
3095  {
3096  Assert (col_indices.size() == values.size(),
3097  ExcDimensionMismatch(col_indices.size(), values.size()));
3098 
3099  set (row, col_indices.size(), &col_indices[0], &values[0],
3100  elide_zero_values);
3101  }
3102 
3103 
3104 
3105  inline
3106  void
3107  SparseMatrix::set (const size_type row,
3108  const size_type n_cols,
3109  const size_type *col_indices,
3110  const TrilinosScalar *values,
3111  const bool elide_zero_values)
3112  {
3113  int ierr;
3114  if (last_action == Add)
3115  {
3116  ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
3117  true);
3118 
3119  Assert (ierr == 0, ExcTrilinosError(ierr));
3120  }
3121 
3122  last_action = Insert;
3123 
3124  TrilinosWrappers::types::int_type *col_index_ptr;
3125  TrilinosScalar *col_value_ptr;
3127 
3128  TrilinosScalar short_val_array[100];
3129  TrilinosWrappers::types::int_type short_index_array[100];
3130  std::vector<TrilinosScalar> long_val_array;
3131  std::vector<TrilinosWrappers::types::int_type> long_index_array;
3132 
3133 
3134  // If we don't elide zeros, the pointers are already available... need to
3135  // cast to non-const pointers as that is the format taken by Trilinos (but
3136  // we will not modify const data)
3137  if (elide_zero_values == false)
3138  {
3139  col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
3140  col_value_ptr = const_cast<TrilinosScalar *>(values);
3141  n_columns = n_cols;
3142  }
3143  else
3144  {
3145  // Otherwise, extract nonzero values in each row and get the
3146  // respective indices.
3147  if (n_cols > 100)
3148  {
3149  long_val_array.resize(n_cols);
3150  long_index_array.resize(n_cols);
3151  col_index_ptr = &long_index_array[0];
3152  col_value_ptr = &long_val_array[0];
3153  }
3154  else
3155  {
3156  col_index_ptr = &short_index_array[0];
3157  col_value_ptr = &short_val_array[0];
3158  }
3159 
3160  n_columns = 0;
3161  for (size_type j=0; j<n_cols; ++j)
3162  {
3163  const double value = values[j];
3165  if (value != 0)
3166  {
3167  col_index_ptr[n_columns] = col_indices[j];
3168  col_value_ptr[n_columns] = value;
3169  n_columns++;
3170  }
3171  }
3172 
3174  }
3175 
3176 
3177  // If the calling matrix owns the row to which we want to insert values,
3178  // we can directly call the Epetra_CrsMatrix input function, which is much
3179  // faster than the Epetra_FECrsMatrix function. We distinguish between two
3180  // cases: the first one is when the matrix is not filled (i.e., it is
3181  // possible to add new elements to the sparsity pattern), and the second
3182  // one is when the pattern is already fixed. In the former case, we add
3183  // the possibility to insert new values, and in the second we just replace
3184  // data.
3185  if (row_partitioner().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
3186  {
3187  if (matrix->Filled() == false)
3188  {
3189  ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
3190  static_cast<TrilinosWrappers::types::int_type>(row),
3191  static_cast<int>(n_columns),const_cast<double *>(col_value_ptr),
3192  col_index_ptr);
3193 
3194  // When inserting elements, we do not want to create exceptions in
3195  // the case when inserting non-local data (since that's what we
3196  // want to do right now).
3197  if (ierr > 0)
3198  ierr = 0;
3199  }
3200  else
3201  ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row, n_columns,
3202  col_value_ptr,
3203  col_index_ptr);
3204  }
3205  else
3206  {
3207  // When we're at off-processor data, we have to stick with the
3208  // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
3209  // we call it is the fastest one (any other will lead to repeated
3210  // allocation and deallocation of memory in order to call the function
3211  // we already use, which is very unefficient if writing one element at
3212  // a time).
3213  compressed = false;
3214 
3215  if (matrix->Filled() == false)
3216  {
3217  ierr = matrix->InsertGlobalValues (1,
3219  n_columns, col_index_ptr,
3220  &col_value_ptr,
3221  Epetra_FECrsMatrix::ROW_MAJOR);
3222  if (ierr > 0)
3223  ierr = 0;
3224  }
3225  else
3226  ierr = matrix->ReplaceGlobalValues (1,
3228  n_columns, col_index_ptr,
3229  &col_value_ptr,
3230  Epetra_FECrsMatrix::ROW_MAJOR);
3231  }
3232 
3233  Assert (ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
3234  AssertThrow (ierr >= 0, ExcTrilinosError(ierr));
3235  }
3236 
3237 
3238 
3239  inline
3240  void
3241  SparseMatrix::add (const size_type i,
3242  const size_type j,
3243  const TrilinosScalar value)
3244  {
3246 
3247  if (value == 0)
3248  {
3249  // we have to do checkings on Insert/Add in any case to be consistent
3250  // with the MPI communication model (see the comments in the
3251  // documentation of TrilinosWrappers::Vector), but we can save some
3252  // work if the addend is zero. However, these actions are done in case
3253  // we pass on to the other function.
3254 
3255  // TODO: fix this (do not run compress here, but fail)
3256  if (last_action == Insert)
3257  {
3258  int ierr;
3259  ierr = matrix->GlobalAssemble(*column_space_map,
3260  row_partitioner(), false);
3261 
3262  Assert (ierr == 0, ExcTrilinosError(ierr));
3263  (void)ierr; // removes -Wunused-but-set-variable in optimized mode
3264  }
3265 
3266  last_action = Add;
3267 
3268  return;
3269  }
3270  else
3271  add (i, 1, &j, &value, false);
3272  }
3273 
3274 
3275 
3276  inline
3277  void
3278  SparseMatrix::add (const std::vector<size_type> &indices,
3279  const FullMatrix<TrilinosScalar> &values,
3280  const bool elide_zero_values)
3281  {
3282  Assert (indices.size() == values.m(),
3283  ExcDimensionMismatch(indices.size(), values.m()));
3284  Assert (values.m() == values.n(), ExcNotQuadratic());
3285 
3286  for (size_type i=0; i<indices.size(); ++i)
3287  add (indices[i], indices.size(), &indices[0], &values(i,0),
3288  elide_zero_values);
3289  }
3290 
3291 
3292 
3293  inline
3294  void
3295  SparseMatrix::add (const std::vector<size_type> &row_indices,
3296  const std::vector<size_type> &col_indices,
3297  const FullMatrix<TrilinosScalar> &values,
3298  const bool elide_zero_values)
3299  {
3300  Assert (row_indices.size() == values.m(),
3301  ExcDimensionMismatch(row_indices.size(), values.m()));
3302  Assert (col_indices.size() == values.n(),
3303  ExcDimensionMismatch(col_indices.size(), values.n()));
3304 
3305  for (size_type i=0; i<row_indices.size(); ++i)
3306  add (row_indices[i], col_indices.size(), &col_indices[0],
3307  &values(i,0), elide_zero_values);
3308  }
3309 
3310 
3311 
3312  inline
3313  void
3314  SparseMatrix::add (const size_type row,
3315  const std::vector<size_type> &col_indices,
3316  const std::vector<TrilinosScalar> &values,
3317  const bool elide_zero_values)
3318  {
3319  Assert (col_indices.size() == values.size(),
3320  ExcDimensionMismatch(col_indices.size(), values.size()));
3321 
3322  add (row, col_indices.size(), &col_indices[0], &values[0],
3323  elide_zero_values);
3324  }
3325 
3326 
3327 
3328  inline
3329  void
3330  SparseMatrix::add (const size_type row,
3331  const size_type n_cols,
3332  const size_type *col_indices,
3333  const TrilinosScalar *values,
3334  const bool elide_zero_values,
3335  const bool /*col_indices_are_sorted*/)
3336  {
3337  int ierr;
3338  if (last_action == Insert)
3339  {
3340  // TODO: this could lead to a dead lock when only one processor
3341  // calls GlobalAssemble.
3342  ierr = matrix->GlobalAssemble(*column_space_map,
3343  row_partitioner(), false);
3344 
3345  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3346  }
3347 
3348  last_action = Add;
3349 
3350  TrilinosWrappers::types::int_type *col_index_ptr;
3351  TrilinosScalar *col_value_ptr;
3353 
3354  double short_val_array[100];
3355  TrilinosWrappers::types::int_type short_index_array[100];
3356  std::vector<TrilinosScalar> long_val_array;
3357  std::vector<TrilinosWrappers::types::int_type> long_index_array;
3358 
3359  // If we don't elide zeros, the pointers are already available... need to
3360  // cast to non-const pointers as that is the format taken by Trilinos (but
3361  // we will not modify const data)
3362  if (elide_zero_values == false)
3363  {
3364  col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
3365  col_value_ptr = const_cast<TrilinosScalar *>(values);
3366  n_columns = n_cols;
3367 #ifdef DEBUG
3368  for (size_type j=0; j<n_cols; ++j)
3370 #endif
3371  }
3372  else
3373  {
3374  // Otherwise, extract nonzero values in each row and the corresponding
3375  // index.
3376  if (n_cols > 100)
3377  {
3378  long_val_array.resize(n_cols);
3379  long_index_array.resize(n_cols);
3380  col_index_ptr = &long_index_array[0];
3381  col_value_ptr = &long_val_array[0];
3382  }
3383  else
3384  {
3385  col_index_ptr = &short_index_array[0];
3386  col_value_ptr = &short_val_array[0];
3387  }
3388 
3389  n_columns = 0;
3390  for (size_type j=0; j<n_cols; ++j)
3391  {
3392  const double value = values[j];
3393 
3395  if (value != 0)
3396  {
3397  col_index_ptr[n_columns] = col_indices[j];
3398  col_value_ptr[n_columns] = value;
3399  n_columns++;
3400  }
3401  }
3402 
3404 
3405  }
3406 
3407  // If the calling processor owns the row to which we want to add values, we
3408  // can directly call the Epetra_CrsMatrix input function, which is much
3409  // faster than the Epetra_FECrsMatrix function.
3410  if (row_partitioner().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
3411  {
3412  ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row, n_columns,
3413  col_value_ptr,
3414  col_index_ptr);
3415  }
3416  else
3417  {
3418  // When we're at off-processor data, we have to stick with the
3419  // standard SumIntoGlobalValues function. Nevertheless, the way we
3420  // call it is the fastest one (any other will lead to repeated
3421  // allocation and deallocation of memory in order to call the function
3422  // we already use, which is very inefficient if writing one element at
3423  // a time).
3424  compressed = false;
3425 
3426  ierr = matrix->SumIntoGlobalValues (1,
3427  (TrilinosWrappers::types::int_type *)&row, n_columns,
3428  col_index_ptr,
3429  &col_value_ptr,
3430  Epetra_FECrsMatrix::ROW_MAJOR);
3431  }
3432 
3433 #ifdef DEBUG
3434  if (ierr > 0)
3435  {
3436  std::cout << "------------------------------------------"
3437  << std::endl;
3438  std::cout << "Got error " << ierr << " in row " << row
3439  << " of proc " << row_partitioner().Comm().MyPID()
3440  << " when trying to add the columns:" << std::endl;
3441  for (TrilinosWrappers::types::int_type i=0; i<n_columns; ++i)
3442  std::cout << col_index_ptr[i] << " ";
3443  std::cout << std::endl << std::endl;
3444  std::cout << "Matrix row has the following indices:" << std::endl;
3445  int n_indices, *indices;
3446  trilinos_sparsity_pattern().ExtractMyRowView(row_partitioner().LID(static_cast<TrilinosWrappers::types::int_type>(row)),
3447  n_indices,
3448  indices);
3449  for (TrilinosWrappers::types::int_type i=0; i<n_indices; ++i)
3450  std::cout << indices[i] << " ";
3451  std::cout << std::endl << std::endl;
3452  Assert (ierr <= 0,
3453  ExcAccessToNonPresentElement(row, col_index_ptr[0]));
3454  }
3455 #endif
3456  Assert (ierr >= 0, ExcTrilinosError(ierr));
3457  }
3458 
3459 
3460 
3461  // inline "simple" functions that are
3462  // called frequently and do only involve
3463  // a call to some Trilinos function.
3464  inline
3466  SparseMatrix::m () const
3467  {
3468 #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
3469  return matrix->NumGlobalRows();
3470 #else
3471  return matrix->NumGlobalRows64();
3472 #endif
3473  }
3474 
3475 
3476 
3477  inline
3479  SparseMatrix::n () const
3480  {
3481 #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
3482  return matrix->NumGlobalCols();
3483 #else
3484  return matrix->NumGlobalCols64();
3485 #endif
3486  }
3487 
3488 
3489 
3490  inline
3491  unsigned int
3492  SparseMatrix::local_size () const
3493  {
3494  return matrix -> NumMyRows();
3495  }
3496 
3497 
3498 
3499  inline
3500  std::pair<SparseMatrix::size_type, SparseMatrix::size_type>
3501  SparseMatrix::local_range () const
3502  {
3503  size_type begin, end;
3504 #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
3505  begin = matrix->RowMap().MinMyGID();
3506  end = matrix->RowMap().MaxMyGID()+1;
3507 #else
3508  begin = matrix->RowMap().MinMyGID64();
3509  end = matrix->RowMap().MaxMyGID64()+1;
3510 #endif
3511 
3512  return std::make_pair (begin, end);
3513  }
3514 
3515 
3516 
3517  inline
3520  {
3521 #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
3522  return matrix->NumGlobalNonzeros();
3523 #else
3524  return matrix->NumGlobalNonzeros64();
3525 #endif
3526  }
3527 
3528 
3529 
3530  template <typename SparsityType>
3531  inline
3532  void SparseMatrix::reinit (const IndexSet &parallel_partitioning,
3533  const SparsityType &sparsity_pattern,
3534  const MPI_Comm &communicator,
3535  const bool exchange_data)
3536  {
3537  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, false);
3538  reinit (map, map, sparsity_pattern, exchange_data);
3539  }
3540 
3541 
3542 
3543  template <typename SparsityType>
3544  inline
3545  void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning,
3546  const IndexSet &col_parallel_partitioning,
3547  const SparsityType &sparsity_pattern,
3548  const MPI_Comm &communicator,
3549  const bool exchange_data)
3550  {
3551  Epetra_Map row_map =
3552  row_parallel_partitioning.make_trilinos_map (communicator, false);
3553  Epetra_Map col_map =
3554  col_parallel_partitioning.make_trilinos_map (communicator, false);
3555  reinit (row_map, col_map, sparsity_pattern, exchange_data);
3556  }
3557 
3558 
3559 
3560  template <typename number>
3561  inline
3562  void SparseMatrix::reinit (const IndexSet &parallel_partitioning,
3563  const ::SparseMatrix<number> &sparse_matrix,
3564  const MPI_Comm &communicator,
3565  const double drop_tolerance,
3566  const bool copy_values,
3567  const ::SparsityPattern *use_this_sparsity)
3568  {
3569  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, false);
3570  reinit (map, map, sparse_matrix, drop_tolerance, copy_values,
3571  use_this_sparsity);
3572  }
3573 
3574 
3575 
3576  template <typename number>
3577  inline
3578  void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning,
3579  const IndexSet &col_parallel_partitioning,
3580  const ::SparseMatrix<number> &sparse_matrix,
3581  const MPI_Comm &communicator,
3582  const double drop_tolerance,
3583  const bool copy_values,
3584  const ::SparsityPattern *use_this_sparsity)
3585  {
3586  Epetra_Map row_map =
3587  row_parallel_partitioning.make_trilinos_map (communicator, false);
3588  Epetra_Map col_map =
3589  col_parallel_partitioning.make_trilinos_map (communicator, false);
3590  reinit (row_map, col_map, sparse_matrix, drop_tolerance, copy_values,
3591  use_this_sparsity);
3592  }
3593 
3594 
3595 
3596  inline
3597  TrilinosScalar
3598  SparseMatrix::l1_norm () const
3599  {
3600  Assert (matrix->Filled(), ExcMatrixNotCompressed());
3601  return matrix->NormOne();
3602  }
3603 
3604 
3605 
3606  inline
3607  TrilinosScalar
3608  SparseMatrix::linfty_norm () const
3609  {
3610  Assert (matrix->Filled(), ExcMatrixNotCompressed());
3611  return matrix->NormInf();
3612  }
3613 
3614 
3615 
3616  inline
3617  TrilinosScalar
3619  {
3620  Assert (matrix->Filled(), ExcMatrixNotCompressed());
3621  return matrix->NormFrobenius();
3622  }
3623 
3624 
3625 
3626  inline
3627  SparseMatrix &
3628  SparseMatrix::operator *= (const TrilinosScalar a)
3629  {
3630  const int ierr = matrix->Scale (a);
3631  Assert (ierr == 0, ExcTrilinosError(ierr));
3632  (void)ierr; // removes -Wunused-variable in optimized mode
3633 
3634  return *this;
3635  }
3636 
3637 
3638 
3639  inline
3640  SparseMatrix &
3641  SparseMatrix::operator /= (const TrilinosScalar a)
3642  {
3643  Assert (a !=0, ExcDivideByZero());
3644 
3645  const TrilinosScalar factor = 1./a;
3646 
3647  const int ierr = matrix->Scale (factor);
3648  Assert (ierr == 0, ExcTrilinosError(ierr));
3649  (void)ierr; // removes -Wunused-variable in optimized mode
3650 
3651  return *this;
3652  }
3653 
3654 
3655 
3656  namespace internal
3657  {
3658  namespace SparseMatrix
3659  {
3660  template <typename VectorType>
3661  inline
3662  void check_vector_map_equality(const Epetra_CrsMatrix &,
3663  const VectorType &,
3664  const VectorType &)
3665  {
3666  }
3667 
3668  inline
3669  void check_vector_map_equality(const Epetra_CrsMatrix &m,
3671  const TrilinosWrappers::MPI::Vector &out)
3672  {
3673  Assert (in.vector_partitioner().SameAs(m.DomainMap()) == true,
3674  ExcMessage ("Column map of matrix does not fit with vector map!"));
3675  Assert (out.vector_partitioner().SameAs(m.RangeMap()) == true,
3676  ExcMessage ("Row map of matrix does not fit with vector map!"));
3677  (void)m;
3678  (void)in;
3679  (void)out;
3680  }
3681  }
3682  }
3683 
3684 
3685  template <typename VectorType>
3686  inline
3687  void
3688  SparseMatrix::vmult (VectorType &dst,
3689  const VectorType &src) const
3690  {
3691  Assert (&src != &dst, ExcSourceEqualsDestination());
3692  Assert (matrix->Filled(), ExcMatrixNotCompressed());
3693  (void)src;
3694  (void)dst;
3695 
3696  internal::SparseMatrix::check_vector_map_equality(*matrix, src, dst);
3697  const size_type dst_local_size = dst.end() - dst.begin();
3698  AssertDimension (dst_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
3699  (void)dst_local_size;
3700  const size_type src_local_size = src.end() - src.begin();
3701  AssertDimension (src_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
3702  (void)src_local_size;
3703 
3704  Epetra_MultiVector tril_dst (View, matrix->RangeMap(), dst.begin(),
3705  matrix->DomainMap().NumMyPoints(), 1);
3706  Epetra_MultiVector tril_src (View, matrix->DomainMap(),
3707  const_cast<TrilinosScalar *>(src.begin()),
3708  matrix->DomainMap().NumMyPoints(), 1);
3709 
3710  const int ierr = matrix->Multiply (false, tril_src, tril_dst);
3711  Assert (ierr == 0, ExcTrilinosError(ierr));
3712  (void)ierr; // removes -Wunused-variable in optimized mode
3713  }
3714 
3715 
3716 
3717  template <typename VectorType>
3718  inline
3719  void
3720  SparseMatrix::Tvmult (VectorType &dst,
3721  const VectorType &src) const
3722  {
3723  Assert (&src != &dst, ExcSourceEqualsDestination());
3724  Assert (matrix->Filled(), ExcMatrixNotCompressed());
3725 
3726  internal::SparseMatrix::check_vector_map_equality(*matrix, dst, src);
3727  const size_type dst_local_size = dst.end() - dst.begin();
3728  AssertDimension (dst_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
3729  const size_type src_local_size = src.end() - src.begin();
3730  AssertDimension (src_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
3731 
3732  Epetra_MultiVector tril_dst (View, matrix->DomainMap(), dst.begin(),
3733  matrix->DomainMap().NumMyPoints(), 1);
3734  Epetra_MultiVector tril_src (View, matrix->RangeMap(),
3735  const_cast<double *>(src.begin()),
3736  matrix->DomainMap().NumMyPoints(), 1);
3737 
3738  const int ierr = matrix->Multiply (true, tril_src, tril_dst);
3739  Assert (ierr == 0, ExcTrilinosError(ierr));
3740  (void)ierr; // removes -Wunused-variable in optimized mode
3741  }
3742 
3743 
3744 
3745  template <typename VectorType>
3746  inline
3747  void
3748  SparseMatrix::vmult_add (VectorType &dst,
3749  const VectorType &src) const
3750  {
3751  Assert (&src != &dst, ExcSourceEqualsDestination());
3752 
3753  // Reinit a temporary vector with fast argument set, which does not
3754  // overwrite the content (to save time). However, the
3755  // TrilinosWrappers::Vector classes do not support this, so create a
3756  // deal.II local vector that has this fast setting. It will be accepted in
3757  // vmult because it only checks the local size.
3758  ::Vector<TrilinosScalar> temp_vector;
3759  temp_vector.reinit(dst.end()-dst.begin(), true);
3760  ::VectorView<TrilinosScalar> src_view(src.end()-src.begin(), src.begin());
3761  ::VectorView<TrilinosScalar> dst_view(dst.end()-dst.begin(), dst.begin());
3762  vmult (temp_vector, static_cast<const ::Vector<TrilinosScalar>&>(src_view));
3763  if (dst_view.size() > 0)
3764  dst_view += temp_vector;
3765  }
3766 
3767 
3768 
3769  template <typename VectorType>
3770  inline
3771  void
3772  SparseMatrix::Tvmult_add (VectorType &dst,
3773  const VectorType &src) const
3774  {
3775  Assert (&src != &dst, ExcSourceEqualsDestination());
3776 
3777  // Reinit a temporary vector with fast argument set, which does not
3778  // overwrite the content (to save time). However, the
3779  // TrilinosWrappers::Vector classes do not support this, so create a
3780  // deal.II local vector that has this fast setting. It will be accepted in
3781  // vmult because it only checks the local size.
3782  ::Vector<TrilinosScalar> temp_vector;
3783  temp_vector.reinit(dst.end()-dst.begin(), true);
3784  ::VectorView<TrilinosScalar> src_view(src.end()-src.begin(), src.begin());
3785  ::VectorView<TrilinosScalar> dst_view(dst.end()-dst.begin(), dst.begin());
3786  Tvmult (temp_vector, static_cast<const ::Vector<TrilinosScalar>&>(src_view));
3787  if (dst_view.size() > 0)
3788  dst_view += temp_vector;
3789  }
3790 
3791 
3792 
3793  inline
3794  TrilinosScalar
3795  SparseMatrix::matrix_norm_square (const VectorBase &v) const
3796  {
3798  ExcNotQuadratic());
3799 
3800  VectorBase temp_vector;
3801  temp_vector.reinit(v, true);
3802 
3803  vmult (temp_vector, v);
3804  return temp_vector*v;
3805  }
3806 
3807 
3808 
3809  inline
3810  TrilinosScalar
3811  SparseMatrix::matrix_scalar_product (const VectorBase &u,
3812  const VectorBase &v) const
3813  {
3815  ExcNotQuadratic());
3816 
3817  VectorBase temp_vector;
3818  temp_vector.reinit(v, true);
3819 
3820  vmult (temp_vector, v);
3821  return u*temp_vector;
3822  }
3823 
3824 
3825 
3826  inline
3827  TrilinosScalar
3828  SparseMatrix::residual (VectorBase &dst,
3829  const VectorBase &x,
3830  const VectorBase &b) const
3831  {
3832  vmult (dst, x);
3833  dst -= b;
3834  dst *= -1.;
3835 
3836  return dst.l2_norm();
3837  }
3838 
3839 
3840  inline
3841  const Epetra_CrsMatrix &
3843  {
3844  return static_cast<const Epetra_CrsMatrix &>(*matrix);
3845  }
3846 
3847 
3848 
3849  inline
3850  const Epetra_CrsGraph &
3852  {
3853  return matrix->Graph();
3854  }
3855 
3856 
3857 
3858  inline
3859  const Epetra_Map &
3861  {
3862  return matrix->DomainMap();
3863  }
3864 
3865 
3866 
3867  inline
3868  const Epetra_Map &
3870  {
3871  return matrix->RangeMap();
3872  }
3873 
3874 
3875 
3876  inline
3877  const Epetra_Map &
3879  {
3880  return matrix->RowMap();
3881  }
3882 
3883 
3884 
3885  inline
3886  const Epetra_Map &
3888  {
3889  return matrix->ColMap();
3890  }
3891 
3892 
3893 
3894  inline
3895  void
3897  {
3898  //nothing to do here
3899  }
3900 
3901 
3902 
3903  inline
3904  void
3906  {
3907  //nothing to do here
3908  }
3909 
3910 
3911 
3912 #endif // DOXYGEN
3913 
3914 }
3915 
3916 
3917 DEAL_II_NAMESPACE_CLOSE
3918 
3919 
3920 #endif // DEAL_II_WITH_TRILINOS
3921 
3922 
3923 /*----------------------- trilinos_sparse_matrix.h --------------------*/
3924 
3925 #endif
3926 /*----------------------- trilinos_sparse_matrix.h --------------------*/
DeclException0(ExcSourceEqualsDestination)
bool operator!=(const Iterator< Constness > &) const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:858
SparseMatrixIterators::Iterator< false > iterator
std_cxx1x::shared_ptr< Epetra_FECrsMatrix > matrix
const Epetra_CrsMatrix & trilinos_matrix() const
std_cxx1x::shared_ptr< Epetra_Map > column_space_map
void Tvmult(VectorType &dst, const VectorType &src) const
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
TrilinosScalar matrix_norm_square(const VectorBase &v) const
::ExceptionBase & ExcMessage(std::string arg1)
const Epetra_Map & row_partitioner() const
::types::global_dof_index size_type
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const VectorBase &V=VectorBase()) const
STL namespace.
std_cxx1x::shared_ptr< std::vector< size_type > > colnum_cache
#define AssertThrow(cond, exc)
Definition: exceptions.h:362
bool operator>(const Iterator< Constness > &) const
const_iterator begin() const
TrilinosScalar matrix_scalar_product(const VectorBase &u, const VectorBase &v) const
void Tvmult_add(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const double d)
const Accessor< Constness > * operator->() const
bool is_finite(const double x)
void reinit(const SparsityType &sparsity_pattern)
const Epetra_Map & col_partitioner() const
DeclException2(ExcInvalidIndexWithinRow, size_type, size_type,<< "Attempt to access element "<< arg2<< " of row "<< arg1<< " which doesn't have that many elements.")
DeclException4(ExcAccessToNonLocalElement, size_type, size_type, size_type, size_type,<< "You tried to access element ("<< arg1<< "/"<< arg2<< ")"<< " of a distributed matrix, but only rows "<< arg3<< " through "<< arg4<< " are stored locally and can be accessed.")
size_type n() const
void vmult(VectorType &dst, const VectorType &src) const
void add(const size_type i, const size_type j, const TrilinosScalar value)
virtual void reinit(const size_type N, const bool fast=false)
#define Assert(cond, exc)
Definition: exceptions.h:299
TrilinosScalar linfty_norm() const
std_cxx1x::shared_ptr< std::vector< TrilinosScalar > > value_cache
SparseMatrix & operator/=(const TrilinosScalar factor)
void compress() DEAL_II_DEPRECATED
::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
unsigned int local_size() const
DeclException1(ExcTrilinosError, int,<< "An error with error number "<< arg1<< " occurred while calling a Trilinos function")
DeclException2(ExcInvalidIndex, size_type, size_type,<< "The entry with index <"<< arg1<< ','<< arg2<< "> does not exist.")
BlockCompressedSparsityPattern CompressedBlockSparsityPattern DEAL_II_DEPRECATED
size_type m() const
const Epetra_Map & vector_partitioner() const
const Epetra_Map & domain_partitioner() const
AccessorBase(SparseMatrix *matrix, const size_type row, const size_type index)
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
void copy_from(const SparseMatrix &source)
const Epetra_Map & range_partitioner() const
TrilinosScalar residual(VectorBase &dst, const VectorBase &x, const VectorBase &b) const
SparseMatrix & operator*=(const TrilinosScalar factor)
::ExceptionBase & ExcIteratorPastEnd()
SparseMatrixIterators::Iterator< true > const_iterator
TrilinosScalar el(const size_type i, const size_type j) const
types::global_dof_index size_type
::ExceptionBase & ExcNumberNotFinite()
size_type n_nonzero_elements() const
DeclException3(ExcAccessToNonlocalRow, std::size_t, std::size_t, std::size_t,<< "You tried to access row "<< arg1<< " of a distributed sparsity pattern, "<< " but only rows "<< arg2<< " through "<< arg3<< " are stored locally and can be accessed.")
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
bool operator==(const Iterator< Constness > &) const
TrilinosScalar diag_element(const size_type i) const
const_iterator end() const
::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
TrilinosScalar frobenius_norm() const
void mmult(SparseMatrix &C, const SparseMatrix &B, const VectorBase &V=VectorBase()) const
::ExceptionBase & ExcInternalError()
::ExceptionBase & ExcDivideByZero()
TrilinosScalar l1_norm() const
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::pair< size_type, size_type > local_range() const
size_type memory_consumption() const
unsigned int row_length(const size_type row) const
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
const Accessor< Constness > & operator*() const
bool in_local_range(const size_type index) const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void vmult_add(VectorType &dst, const VectorType &src) const