OR-Tools  8.2
sparse.h
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14//
15// The following are very good references for terminology, data structures,
16// and algorithms:
17//
18// I.S. Duff, A.M. Erisman and J.K. Reid, "Direct Methods for Sparse Matrices",
19// Clarendon, Oxford, UK, 1987, ISBN 0-19-853421-3,
20// http://www.amazon.com/dp/0198534213.
21//
22//
23// T.A. Davis, "Direct methods for Sparse Linear Systems", SIAM, Philadelphia,
24// 2006, ISBN-13: 978-0-898716-13, http://www.amazon.com/dp/0898716136.
25//
26//
27// Both books also contain a wealth of references.
28
29#ifndef OR_TOOLS_LP_DATA_SPARSE_H_
30#define OR_TOOLS_LP_DATA_SPARSE_H_
31
32#include <string>
33
40
41namespace operations_research {
42namespace glop {
43
44class CompactSparseMatrixView;
45
46// --------------------------------------------------------
47// SparseMatrix
48// --------------------------------------------------------
49// SparseMatrix is a class for sparse matrices suitable for computation.
50// Data is represented using the so-called compressed-column storage scheme.
51// Entries (row, col, value) are stored by column using a SparseColumn.
52//
53// Citing [Duff et al, 1987], a matrix is sparse if many of its coefficients are
54// zero and if there is an advantage in exploiting its zeros.
55// For practical reasons, not all zeros are exploited (for example those that
56// result from calculations.) The term entry refers to those coefficients that
57// are handled explicitly. All non-zeros are entries while some zero
58// coefficients may also be entries.
59//
60// Note that no special ordering of entries is assumed.
62 public:
64
65 // Useful for testing. This makes it possible to write:
66 // SparseMatrix matrix {
67 // {1, 2, 3},
68 // {4, 5, 6},
69 // {7, 8, 9}};
70#if (!defined(_MSC_VER) || _MSC_VER >= 1800)
72 std::initializer_list<std::initializer_list<Fractional>> init_list);
73#endif
74 // Clears internal data structure, i.e. erases all the columns and set
75 // the number of rows to zero.
76 void Clear();
77
78 // Returns true if the matrix is empty.
79 // That is if num_rows() OR num_cols() are zero.
80 bool IsEmpty() const;
81
82 // Cleans the columns, i.e. removes zero-values entries, removes duplicates
83 // entries and sorts remaining entries in increasing row order.
84 // Call with care: Runs in O(num_cols * column_cleanup), with each column
85 // cleanup running in O(num_entries * log(num_entries)).
86 void CleanUp();
87
88 // Call CheckNoDuplicates() on all columns, useful for doing a DCHECK.
89 bool CheckNoDuplicates() const;
90
91 // Call IsCleanedUp() on all columns, useful for doing a DCHECK.
92 bool IsCleanedUp() const;
93
94 // Change the number of row of this matrix.
95 void SetNumRows(RowIndex num_rows);
96
97 // Appends an empty column and returns its index.
98 ColIndex AppendEmptyColumn();
99
100 // Appends a unit vector defined by the single entry (row, value).
101 // Note that the row should be smaller than the number of rows of the matrix.
102 void AppendUnitVector(RowIndex row, Fractional value);
103
104 // Swaps the content of this SparseMatrix with the one passed as argument.
105 // Works in O(1).
106 void Swap(SparseMatrix* matrix);
107
108 // Populates the matrix with num_cols columns of zeros. As the number of rows
109 // is specified by num_rows, the matrix is not necessarily square.
110 // Previous columns/values are deleted.
111 void PopulateFromZero(RowIndex num_rows, ColIndex num_cols);
112
113 // Populates the matrix from the Identity matrix of size num_cols.
114 // Previous columns/values are deleted.
115 void PopulateFromIdentity(ColIndex num_cols);
116
117 // Populates the matrix from the transposed of the given matrix.
118 // Note that this preserve the property of lower/upper triangular matrix
119 // to have the diagonal coefficients first/last in each columns. It actually
120 // sorts the entries in each columns by their indices.
121 template <typename Matrix>
122 void PopulateFromTranspose(const Matrix& input);
123
124 // Populates a SparseMatrix from another one (copy), note that this run in
125 // O(number of entries in the matrix).
126 void PopulateFromSparseMatrix(const SparseMatrix& matrix);
127
128 // Populates a SparseMatrix from the image of a matrix A through the given
129 // row_perm and inverse_col_perm. See permutation.h for more details.
130 template <typename Matrix>
131 void PopulateFromPermutedMatrix(const Matrix& a,
132 const RowPermutation& row_perm,
133 const ColumnPermutation& inverse_col_perm);
134
135 // Populates a SparseMatrix from the result of alpha * A + beta * B,
136 // where alpha and beta are Fractionals, A and B are sparse matrices.
138 Fractional beta, const SparseMatrix& b);
139
140 // Multiplies SparseMatrix a by SparseMatrix b.
141 void PopulateFromProduct(const SparseMatrix& a, const SparseMatrix& b);
142
143 // Removes the marked columns from the matrix and adjust its size.
144 // This runs in O(num_cols).
145 void DeleteColumns(const DenseBooleanRow& columns_to_delete);
146
147 // Applies the given row permutation and deletes the rows for which
148 // permutation[row] is kInvalidRow. Sets the new number of rows to num_rows.
149 // This runs in O(num_entries).
150 void DeleteRows(RowIndex num_rows, const RowPermutation& permutation);
151
152 // Appends all rows from the given matrix to the calling object after the last
153 // row of the calling object. Both matrices must have the same number of
154 // columns. The method returns true if the rows were added successfully and
155 // false if it can't add the rows because the number of columns of the
156 // matrices are different.
157 bool AppendRowsFromSparseMatrix(const SparseMatrix& matrix);
158
159 // Applies the row permutation.
160 void ApplyRowPermutation(const RowPermutation& row_perm);
161
162 // Returns the coefficient at position row in column col.
163 // Call with care: runs in O(num_entries_in_col) as entries may not be sorted.
164 Fractional LookUpValue(RowIndex row, ColIndex col) const;
165
166 // Returns true if the matrix equals a (with a maximum error smaller than
167 // given the tolerance).
168 bool Equals(const SparseMatrix& a, Fractional tolerance) const;
169
170 // Returns, in min_magnitude and max_magnitude, the minimum and maximum
171 // magnitudes of the non-zero coefficients of the calling object.
172 void ComputeMinAndMaxMagnitudes(Fractional* min_magnitude,
173 Fractional* max_magnitude) const;
174
175 // Return the matrix dimension.
176 RowIndex num_rows() const { return num_rows_; }
177 ColIndex num_cols() const { return ColIndex(columns_.size()); }
178
179 // Access the underlying sparse columns.
180 const SparseColumn& column(ColIndex col) const { return columns_[col]; }
181 SparseColumn* mutable_column(ColIndex col) { return &(columns_[col]); }
182
183 // Returns the total numbers of entries in the matrix.
184 // Runs in O(num_cols).
185 EntryIndex num_entries() const;
186
187 // Computes the 1-norm of the matrix.
188 // The 1-norm |A| is defined as max_j sum_i |a_ij| or
189 // max_col sum_row |a(row,col)|.
191
192 // Computes the oo-norm (infinity-norm) of the matrix.
193 // The oo-norm |A| is defined as max_i sum_j |a_ij| or
194 // max_row sum_col |a(row,col)|.
196
197 // Returns a dense representation of the matrix.
198 std::string Dump() const;
199
200 private:
201 // Resets the internal data structure and create an empty rectangular
202 // matrix of size num_rows x num_cols.
203 void Reset(ColIndex num_cols, RowIndex num_rows);
204
205 // Vector of sparse columns.
207
208 // Number of rows. This is needed as sparse columns don't have a maximum
209 // number of rows.
210 RowIndex num_rows_;
211
212 DISALLOW_COPY_AND_ASSIGN(SparseMatrix);
213};
214
215// A matrix constructed from a list of already existing SparseColumn. This class
216// does not take ownership of the underlying columns, and thus they must outlive
217// this class (and keep the same address in memory).
219 public:
221 explicit MatrixView(const SparseMatrix& matrix) {
222 PopulateFromMatrix(matrix);
223 }
224
225 // Takes all the columns of the given matrix.
226 void PopulateFromMatrix(const SparseMatrix& matrix) {
227 const ColIndex num_cols = matrix.num_cols();
228 columns_.resize(num_cols, nullptr);
229 for (ColIndex col(0); col < num_cols; ++col) {
230 columns_[col] = &matrix.column(col);
231 }
232 num_rows_ = matrix.num_rows();
233 }
234
235 // Takes all the columns of the first matrix followed by the columns of the
236 // second matrix.
238 const SparseMatrix& matrix_b) {
239 const ColIndex num_cols = matrix_a.num_cols() + matrix_b.num_cols();
240 columns_.resize(num_cols, nullptr);
241 for (ColIndex col(0); col < matrix_a.num_cols(); ++col) {
242 columns_[col] = &matrix_a.column(col);
243 }
244 for (ColIndex col(0); col < matrix_b.num_cols(); ++col) {
245 columns_[matrix_a.num_cols() + col] = &matrix_b.column(col);
246 }
247 num_rows_ = std::max(matrix_a.num_rows(), matrix_b.num_rows());
248 }
249
250 // Takes only the columns of the given matrix that belongs to the given basis.
251 void PopulateFromBasis(const MatrixView& matrix,
252 const RowToColMapping& basis) {
253 columns_.resize(RowToColIndex(basis.size()), nullptr);
254 for (RowIndex row(0); row < basis.size(); ++row) {
255 columns_[RowToColIndex(row)] = &matrix.column(basis[row]);
256 }
257 num_rows_ = matrix.num_rows();
258 }
259
260 // Same behavior as the SparseMatrix functions above.
261 bool IsEmpty() const { return columns_.empty(); }
262 RowIndex num_rows() const { return num_rows_; }
263 ColIndex num_cols() const { return columns_.size(); }
264 const SparseColumn& column(ColIndex col) const { return *columns_[col]; }
265 EntryIndex num_entries() const;
268
269 private:
270 RowIndex num_rows_;
272};
273
274extern template void SparseMatrix::PopulateFromTranspose<SparseMatrix>(
275 const SparseMatrix& input);
276extern template void SparseMatrix::PopulateFromPermutedMatrix<SparseMatrix>(
277 const SparseMatrix& a, const RowPermutation& row_perm,
278 const ColumnPermutation& inverse_col_perm);
279extern template void
280SparseMatrix::PopulateFromPermutedMatrix<CompactSparseMatrixView>(
281 const CompactSparseMatrixView& a, const RowPermutation& row_perm,
282 const ColumnPermutation& inverse_col_perm);
283
284// Another matrix representation which is more efficient than a SparseMatrix but
285// doesn't allow matrix modification. It is faster to construct, uses less
286// memory and provides a better cache locality when iterating over the non-zeros
287// of the matrix columns.
289 public:
291
292 // Convenient constructors for tests.
293 // TODO(user): If this is needed in production code, it can be done faster.
294 explicit CompactSparseMatrix(const SparseMatrix& matrix) {
295 PopulateFromMatrixView(MatrixView(matrix));
296 }
297
298 // Creates a CompactSparseMatrix from the given MatrixView. The matrices are
299 // the same, only the representation differ. Note that the entry order in
300 // each column is preserved.
301 void PopulateFromMatrixView(const MatrixView& input);
302
303 // Creates a CompactSparseMatrix from the transpose of the given
304 // CompactSparseMatrix. Note that the entries in each columns will be ordered
305 // by row indices.
306 void PopulateFromTranspose(const CompactSparseMatrix& input);
307
308 // Clears the matrix and sets its number of rows. If none of the Populate()
309 // function has been called, Reset() must be called before calling any of the
310 // Add*() functions below.
311 void Reset(RowIndex num_rows);
312
313 // Adds a dense column to the CompactSparseMatrix (only the non-zero will be
314 // actually stored). This work in O(input.size()) and returns the index of the
315 // added column.
316 ColIndex AddDenseColumn(const DenseColumn& dense_column);
317
318 // Same as AddDenseColumn(), but only adds the non-zero from the given start.
319 ColIndex AddDenseColumnPrefix(const DenseColumn& dense_column,
320 RowIndex start);
321
322 // Same as AddDenseColumn(), but uses the given non_zeros pattern of input.
323 // If non_zeros is empty, this actually calls AddDenseColumn().
324 ColIndex AddDenseColumnWithNonZeros(const DenseColumn& dense_column,
325 const std::vector<RowIndex>& non_zeros);
326
327 // Adds a dense column for which we know the non-zero positions and clears it.
328 // Note that this function supports duplicate indices in non_zeros. The
329 // complexity is in O(non_zeros.size()). Only the indices present in non_zeros
330 // will be cleared. Returns the index of the added column.
331 ColIndex AddAndClearColumnWithNonZeros(DenseColumn* column,
332 std::vector<RowIndex>* non_zeros);
333
334 // Returns the number of entries (i.e. degree) of the given column.
335 EntryIndex ColumnNumEntries(ColIndex col) const {
336 return starts_[col + 1] - starts_[col];
337 }
338
339 // Returns the matrix dimensions. See same functions in SparseMatrix.
340 EntryIndex num_entries() const {
341 DCHECK_EQ(coefficients_.size(), rows_.size());
342 return coefficients_.size();
343 }
344 RowIndex num_rows() const { return num_rows_; }
345 ColIndex num_cols() const { return num_cols_; }
346
347 // Returns whether or not this matrix contains any non-zero entries.
348 bool IsEmpty() const {
349 DCHECK_EQ(coefficients_.size(), rows_.size());
350 return coefficients_.empty();
351 }
352
353 // Functions to iterate on the entries of a given column:
354 // for (const EntryIndex i : compact_matrix_.Column(col)) {
355 // const RowIndex row = compact_matrix_.EntryRow(i);
356 // const Fractional coefficient = compact_matrix_.EntryCoefficient(i);
357 // }
359 return ::util::IntegerRange<EntryIndex>(starts_[col], starts_[col + 1]);
360 }
361 Fractional EntryCoefficient(EntryIndex i) const { return coefficients_[i]; }
362 RowIndex EntryRow(EntryIndex i) const { return rows_[i]; }
363
364 ColumnView column(ColIndex col) const {
365 DCHECK_LT(col, num_cols_);
366
367 // Note that the start may be equal to row.size() if the last columns
368 // are empty, it is why we don't use &row[start].
369 const EntryIndex start = starts_[col];
370 return ColumnView(starts_[col + 1] - start, rows_.data() + start.value(),
371 coefficients_.data() + start.value());
372 }
373
374 // Returns true if the given column is empty. Note that for triangular matrix
375 // this does not include the diagonal coefficient (see below).
376 bool ColumnIsEmpty(ColIndex col) const {
377 return starts_[col + 1] == starts_[col];
378 }
379
380 // Returns the scalar product of the given row vector with the column of index
381 // col of this matrix. This function is declared in the .h for efficiency.
382 Fractional ColumnScalarProduct(ColIndex col, const DenseRow& vector) const {
383 Fractional result = 0.0;
384 for (const EntryIndex i : Column(col)) {
385 result += EntryCoefficient(i) * vector[RowToColIndex(EntryRow(i))];
386 }
387 return result;
388 }
389
390 // Adds a multiple of the given column of this matrix to the given
391 // dense_column. If multiplier is 0.0, this function does nothing. This
392 // function is declared in the .h for efficiency.
394 DenseColumn* dense_column) const {
395 if (multiplier == 0.0) return;
396 RETURN_IF_NULL(dense_column);
397 for (const EntryIndex i : Column(col)) {
398 (*dense_column)[EntryRow(i)] += multiplier * EntryCoefficient(i);
399 }
400 }
401
402 // Same as ColumnAddMultipleToDenseColumn() but also adds the new non-zeros to
403 // the non_zeros vector. A non-zero is "new" if is_non_zero[row] was false,
404 // and we update dense_column[row]. This function also updates is_non_zero.
406 Fractional multiplier,
407 ScatteredColumn* column) const {
408 if (multiplier == 0.0) return;
409 RETURN_IF_NULL(column);
410 for (const EntryIndex i : Column(col)) {
411 const RowIndex row = EntryRow(i);
412 column->Add(row, multiplier * EntryCoefficient(i));
413 }
414 }
415
416 // Copies the given column of this matrix into the given dense_column.
417 // This function is declared in the .h for efficiency.
418 void ColumnCopyToDenseColumn(ColIndex col, DenseColumn* dense_column) const {
419 RETURN_IF_NULL(dense_column);
420 dense_column->AssignToZero(num_rows_);
421 ColumnCopyToClearedDenseColumn(col, dense_column);
422 }
423
424 // Same as ColumnCopyToDenseColumn() but assumes the column to be initially
425 // all zero.
427 DenseColumn* dense_column) const {
428 RETURN_IF_NULL(dense_column);
429 dense_column->resize(num_rows_, 0.0);
430 for (const EntryIndex i : Column(col)) {
431 (*dense_column)[EntryRow(i)] = EntryCoefficient(i);
432 }
433 }
434
435 // Same as ColumnCopyToClearedDenseColumn() but also fills non_zeros.
437 ColIndex col, DenseColumn* dense_column,
438 RowIndexVector* non_zeros) const {
439 RETURN_IF_NULL(dense_column);
440 dense_column->resize(num_rows_, 0.0);
441 non_zeros->clear();
442 for (const EntryIndex i : Column(col)) {
443 const RowIndex row = EntryRow(i);
444 (*dense_column)[row] = EntryCoefficient(i);
445 non_zeros->push_back(row);
446 }
447 }
448
449 void Swap(CompactSparseMatrix* other);
450
451 protected:
452 // The matrix dimensions, properly updated by full and incremental builders.
453 RowIndex num_rows_;
454 ColIndex num_cols_;
455
456 // Holds the columns non-zero coefficients and row positions.
457 // The entries for the column of index col are stored in the entries
458 // [starts_[col], starts_[col + 1]).
462
463 private:
465};
466
467// A matrix view of the basis columns of a CompactSparseMatrix, with basis
468// specified as a RowToColMapping. This class does not take ownership of the
469// underlying matrix or basis, and thus they must outlive this class (and keep
470// the same address in memory).
472 public:
474 const RowToColMapping* basis)
475 : compact_matrix_(*compact_matrix), basis_(*basis) {}
476
477 // Same behavior as the SparseMatrix functions above.
478 bool IsEmpty() const { return compact_matrix_.IsEmpty(); }
479 RowIndex num_rows() const { return compact_matrix_.num_rows(); }
480 ColIndex num_cols() const { return RowToColIndex(basis_.size()); }
481 const ColumnView column(ColIndex col) const {
482 return compact_matrix_.column(basis_[ColToRowIndex(col)]);
483 }
484 EntryIndex num_entries() const;
485 Fractional ComputeOneNorm() const;
487
488 private:
489 // We require that the underlying CompactSparseMatrix and RowToColMapping
490 // continue to own the (potentially large) data accessed via this view.
491 const CompactSparseMatrix& compact_matrix_;
492 const RowToColMapping& basis_;
493};
494
495// Specialization of a CompactSparseMatrix used for triangular matrices.
496// To be able to solve triangular systems as efficiently as possible, the
497// diagonal entries are stored in a separate vector and not in the underlying
498// CompactSparseMatrix.
499//
500// Advanced usage: this class also support matrices that can be permuted into a
501// triangular matrix and some functions work directly on such matrices.
503 public:
504 TriangularMatrix() : all_diagonal_coefficients_are_one_(true) {}
505
506 // Only a subset of the functions from CompactSparseMatrix are exposed (note
507 // the private inheritance). They are extended to deal with diagonal
508 // coefficients properly.
509 void PopulateFromTranspose(const TriangularMatrix& input);
510 void Swap(TriangularMatrix* other);
511 bool IsEmpty() const { return diagonal_coefficients_.empty(); }
512 RowIndex num_rows() const { return num_rows_; }
513 ColIndex num_cols() const { return num_cols_; }
514 EntryIndex num_entries() const {
515 return EntryIndex(num_cols_.value()) + coefficients_.size();
516 }
517
518 // On top of the CompactSparseMatrix functionality, TriangularMatrix::Reset()
519 // also pre-allocates space of size col_size for a number of internal vectors.
520 // This helps reduce costly push_back operations for large problems.
521 //
522 // WARNING: Reset() must be called with a sufficiently large col_capacity
523 // prior to any Add* calls (e.g., AddTriangularColumn).
524 void Reset(RowIndex num_rows, ColIndex col_capacity);
525
526 // Constructs a triangular matrix from the given SparseMatrix. The input is
527 // assumed to be lower or upper triangular without any permutations. This is
528 // checked in debug mode.
529 void PopulateFromTriangularSparseMatrix(const SparseMatrix& input);
530
531 // Functions to create a triangular matrix incrementally, column by column.
532 // A client needs to call Reset(num_rows) first, and then each column must be
533 // added by calling one of the 3 functions below.
534 //
535 // Note that the row indices of the columns are allowed to be permuted: the
536 // diagonal entry of the column #col not being necessarily on the row #col.
537 // This is why these functions require the 'diagonal_row' parameter. The
538 // permutation can be fixed at the end by a call to
539 // ApplyRowPermutationToNonDiagonalEntries() or accounted directly in the case
540 // of PermutedLowerSparseSolve().
541 void AddTriangularColumn(const ColumnView& column, RowIndex diagonal_row);
542 void AddTriangularColumnWithGivenDiagonalEntry(const SparseColumn& column,
543 RowIndex diagonal_row,
544 Fractional diagonal_value);
545 void AddDiagonalOnlyColumn(Fractional diagonal_value);
546
547 // Adds the given sparse column divided by diagonal_coefficient.
548 // The diagonal_row is assumed to be present and its value should be the
549 // same as the one given in diagonal_coefficient. Note that this function
550 // tests for zero coefficients in the input column and removes them.
551 void AddAndNormalizeTriangularColumn(const SparseColumn& column,
552 RowIndex diagonal_row,
553 Fractional diagonal_coefficient);
554
555 // Applies the given row permutation to all entries except the diagonal ones.
556 void ApplyRowPermutationToNonDiagonalEntries(const RowPermutation& row_perm);
557
558 // Copy a triangular column with its diagonal entry to the given SparseColumn.
559 void CopyColumnToSparseColumn(ColIndex col, SparseColumn* output) const;
560
561 // Copy a triangular matrix to the given SparseMatrix.
562 void CopyToSparseMatrix(SparseMatrix* output) const;
563
564 // Returns the index of the first column which is not an identity column (i.e.
565 // a column j with only one entry of value 1 at the j-th row). This is always
566 // zero if the matrix is not triangular.
567 ColIndex GetFirstNonIdentityColumn() const {
568 return first_non_identity_column_;
569 }
570
571 // Returns the diagonal coefficient of the given column.
573 return diagonal_coefficients_[col];
574 }
575
576 // Returns true iff the column contains no non-diagonal entries.
577 bool ColumnIsDiagonalOnly(ColIndex col) const {
579 }
580
581 // --------------------------------------------------------------------------
582 // Triangular solve functions.
583 //
584 // All the functions containing the word Lower (resp. Upper) require the
585 // matrix to be lower (resp. upper_) triangular without any permutation.
586 // --------------------------------------------------------------------------
587
588 // Solve the system L.x = rhs for a lower triangular matrix.
589 // The result overwrite rhs.
590 void LowerSolve(DenseColumn* rhs) const;
591
592 // Solves the system U.x = rhs for an upper triangular matrix.
593 void UpperSolve(DenseColumn* rhs) const;
594
595 // Solves the system Transpose(U).x = rhs where U is upper triangular.
596 // This can be used to do a left-solve for a row vector (i.e. y.Y = rhs).
597 void TransposeUpperSolve(DenseColumn* rhs) const;
598
599 // This assumes that the rhs is all zero before the given position.
600 void LowerSolveStartingAt(ColIndex start, DenseColumn* rhs) const;
601
602 // Solves the system Transpose(L).x = rhs, where L is lower triangular.
603 // This can be used to do a left-solve for a row vector (i.e., y.Y = rhs).
604 void TransposeLowerSolve(DenseColumn* rhs) const;
605
606 // Hyper-sparse version of the triangular solve functions. The passed
607 // non_zero_rows should contain the positions of the symbolic non-zeros of the
608 // result in the order in which they need to be accessed (or in the reverse
609 // order for the Reverse*() versions).
610 //
611 // The non-zero vector is mutable so that the symbolic non-zeros that are
612 // actually zero because of numerical cancellations can be removed.
613 //
614 // The non-zeros can be computed by one of these two methods:
615 // - ComputeRowsToConsiderWithDfs() which will give them in the reverse order
616 // of the one they need to be accessed in. This is only a topological order,
617 // and it will not necessarily be "sorted".
618 // - ComputeRowsToConsiderInSortedOrder() which will always give them in
619 // increasing order.
620 //
621 // Note that if the non-zeros are given in a sorted order, then the
622 // hyper-sparse functions will return EXACTLY the same results as the non
623 // hyper-sparse version above.
624 //
625 // For a given solve, here is the required order:
626 // - For a lower solve, increasing non-zeros order.
627 // - For an upper solve, decreasing non-zeros order.
628 // - for a transpose lower solve, decreasing non-zeros order.
629 // - for a transpose upper solve, increasing non_zeros order.
630 //
631 // For a general discussion of hyper-sparsity in LP, see:
632 // J.A.J. Hall, K.I.M. McKinnon, "Exploiting hyper-sparsity in the revised
633 // simplex method", December 1999, MS 99-014.
634 // http://www.maths.ed.ac.uk/hall/MS-99/MS9914.pdf
635 void HyperSparseSolve(DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
636 void HyperSparseSolveWithReversedNonZeros(
637 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
638 void TransposeHyperSparseSolve(DenseColumn* rhs,
639 RowIndexVector* non_zero_rows) const;
640 void TransposeHyperSparseSolveWithReversedNonZeros(
641 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
642
643 // Given the positions of the non-zeros of a vector, computes the non-zero
644 // positions of the vector after a solve by this triangular matrix. The order
645 // of the returned non-zero positions will be in the REVERSE elimination
646 // order. If the function detects that there are too many non-zeros, then it
647 // aborts early and non_zero_rows is cleared.
648 void ComputeRowsToConsiderWithDfs(RowIndexVector* non_zero_rows) const;
649
650 // Same as TriangularComputeRowsToConsider() but always returns the non-zeros
651 // sorted by rows. It is up to the client to call the direct or reverse
652 // hyper-sparse solve function depending if the matrix is upper or lower
653 // triangular.
654 void ComputeRowsToConsiderInSortedOrder(RowIndexVector* non_zero_rows,
655 Fractional sparsity_ratio,
656 Fractional num_ops_ratio) const;
657 void ComputeRowsToConsiderInSortedOrder(RowIndexVector* non_zero_rows) const;
658 // This is currently only used for testing. It achieves the same result as
659 // PermutedLowerSparseSolve() below, but the latter exploits the sparsity of
660 // rhs and is thus faster for our use case.
661 //
662 // Note that partial_inverse_row_perm only permutes the first k rows, where k
663 // is the same as partial_inverse_row_perm.size(). It is the inverse
664 // permutation of row_perm which only permutes k rows into is [0, k), the
665 // other row images beeing kInvalidRow. The other arguments are the same as
666 // for PermutedLowerSparseSolve() and described there.
667 //
668 // IMPORTANT: lower will contain all the "symbolic" non-zero entries.
669 // A "symbolic" zero entry is one that will be zero whatever the coefficients
670 // of the rhs entries. That is it only depends on the position of its
671 // entries, not on their values. Thus, some of its coefficients may be zero.
672 // This fact is exploited by the LU factorization code. The zero coefficients
673 // of upper will be cleaned, however.
674 void PermutedLowerSolve(const SparseColumn& rhs,
675 const RowPermutation& row_perm,
676 const RowMapping& partial_inverse_row_perm,
677 SparseColumn* lower, SparseColumn* upper) const;
678
679 // This solves a lower triangular system with only ones on the diagonal where
680 // the matrix and the input rhs are permuted by the inverse of row_perm. Note
681 // that the output will also be permuted by the inverse of row_perm. The
682 // function also supports partial permutation. That is if row_perm[i] < 0 then
683 // column row_perm[i] is assumed to be an identity column.
684 //
685 // The output is given as follow:
686 // - lower is cleared, and receives the rows for which row_perm[row] < 0
687 // meaning not yet examined as a pivot (see markowitz.cc).
688 // - upper is NOT cleared, and the other rows (row_perm[row] >= 0) are
689 // appended to it.
690 // - Note that lower and upper can point to the same SparseColumn.
691 //
692 // Note: This function is non-const because ComputeRowsToConsider() also
693 // prunes the underlying dependency graph of the lower matrix while doing a
694 // solve. See marked_ and pruned_ends_ below.
695 void PermutedLowerSparseSolve(const ColumnView& rhs,
696 const RowPermutation& row_perm,
697 SparseColumn* lower, SparseColumn* upper);
698
699 // To be used in DEBUG mode by the client code. This check that the matrix is
700 // lower- (resp. upper-) triangular without any permutation and that there is
701 // no zero on the diagonal. We can't do that on each Solve() that require so,
702 // otherwise it will be too slow in debug.
703 bool IsLowerTriangular() const;
704 bool IsUpperTriangular() const;
705
706 // Visible for testing. This is used by PermutedLowerSparseSolve() to compute
707 // the non-zero indices of the result. The output is as follow:
708 // - lower_column_rows will contains the rows for which row_perm[row] < 0.
709 // - upper_column_rows will contains the other rows in the reverse topological
710 // order in which they should be considered in PermutedLowerSparseSolve().
711 //
712 // This function is non-const because it prunes the underlying dependency
713 // graph of the lower matrix while doing a solve. See marked_ and pruned_ends_
714 // below.
715 //
716 // Pruning the graph at the same time is slower but not by too much (< 2x) and
717 // seems worth doing. Note that when the lower matrix is dense, most of the
718 // graph will likely be pruned. As a result, the symbolic phase will be
719 // negligible compared to the numerical phase so we don't really need a dense
720 // version of PermutedLowerSparseSolve().
721 void PermutedComputeRowsToConsider(const ColumnView& rhs,
722 const RowPermutation& row_perm,
723 RowIndexVector* lower_column_rows,
724 RowIndexVector* upper_column_rows);
725
726 // The upper bound is computed using one of the algorithm presented in
727 // "A Survey of Condition Number Estimation for Triangular Matrices"
728 // https:epubs.siam.org/doi/pdf/10.1137/1029112/
729 Fractional ComputeInverseInfinityNormUpperBound() const;
730 Fractional ComputeInverseInfinityNorm() const;
731
732 private:
733 // Internal versions of some Solve() functions to avoid code duplication.
734 template <bool diagonal_of_ones>
735 void LowerSolveStartingAtInternal(ColIndex start, DenseColumn* rhs) const;
736 template <bool diagonal_of_ones>
737 void UpperSolveInternal(DenseColumn* rhs) const;
738 template <bool diagonal_of_ones>
739 void TransposeLowerSolveInternal(DenseColumn* rhs) const;
740 template <bool diagonal_of_ones>
741 void TransposeUpperSolveInternal(DenseColumn* rhs) const;
742 template <bool diagonal_of_ones>
743 void HyperSparseSolveInternal(DenseColumn* rhs,
744 RowIndexVector* non_zero_rows) const;
745 template <bool diagonal_of_ones>
746 void HyperSparseSolveWithReversedNonZerosInternal(
747 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
748 template <bool diagonal_of_ones>
749 void TransposeHyperSparseSolveInternal(DenseColumn* rhs,
750 RowIndexVector* non_zero_rows) const;
751 template <bool diagonal_of_ones>
752 void TransposeHyperSparseSolveWithReversedNonZerosInternal(
753 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
754
755 // Internal function used by the Add*() functions to finish adding
756 // a new column to a triangular matrix.
757 void CloseCurrentColumn(Fractional diagonal_value);
758
759 // Extra data for "triangular" matrices. The diagonal coefficients are
760 // stored in a separate vector instead of beeing stored in each column.
761 StrictITIVector<ColIndex, Fractional> diagonal_coefficients_;
762
763 // Index of the first column which is not a diagonal only column with a
764 // coefficient of 1. This is used to optimize the solves.
765 ColIndex first_non_identity_column_;
766
767 // This common case allows for more efficient Solve() functions.
768 // TODO(user): Do not even construct diagonal_coefficients_ in this case?
769 bool all_diagonal_coefficients_are_one_;
770
771 // For the hyper-sparse version. These are used to implement a DFS, see
772 // TriangularComputeRowsToConsider() for more details.
773 mutable DenseBooleanColumn stored_;
774 mutable std::vector<RowIndex> nodes_to_explore_;
775
776 // For PermutedLowerSparseSolve().
777 mutable std::vector<RowIndex> lower_column_rows_;
778 mutable std::vector<RowIndex> upper_column_rows_;
779 mutable DenseColumn initially_all_zero_scratchpad_;
780
781 // This boolean vector is used to detect entries that can be pruned during
782 // the DFS used for the symbolic phase of ComputeRowsToConsider().
783 //
784 // Problem: We have a DAG where each node has outgoing arcs towards other
785 // nodes (this adjacency list is NOT sorted by any order). We want to compute
786 // the reachability of a set of nodes S and its topological order. While doing
787 // this, we also want to prune the adjacency lists to exploit the simple fact
788 // that if a -> (b, c) and b -> (c) then c can be removed from the adjacency
789 // list of a since it will be implied through b. Note that this doesn't change
790 // the reachability of any set nor a valid topological ordering of such a set.
791 //
792 // The concept is known as the transitive reduction of a DAG, see
793 // http://en.wikipedia.org/wiki/Transitive_reduction.
794 //
795 // Heuristic algorithm: While doing the DFS to compute Reach(S) and its
796 // topological order, each time we process a node, we mark all its adjacent
797 // node while going down in the DFS, and then we unmark all of them when we go
798 // back up. During the un-marking, if a node is already un-marked, it means
799 // that it was implied by some other path starting at the current node and we
800 // can prune it and remove it from the adjacency list of the current node.
801 //
802 // Note(user): I couldn't find any reference for this algorithm, even though
803 // I suspect I am not the first one to need someting similar.
804 mutable DenseBooleanColumn marked_;
805
806 // This is used to represent a pruned sub-matrix of the current matrix that
807 // corresponds to the pruned DAG as described in the comment above for
808 // marked_. This vector is used to encode the sub-matrix as follow:
809 // - Both the rows and the coefficients of the pruned matrix are still stored
810 // in rows_ and coefficients_.
811 // - The data of column 'col' is still stored starting at starts_[col].
812 // - But, its end is given by pruned_ends_[col] instead of starts_[col + 1].
813 //
814 // The idea of using a smaller graph for the symbolic phase is well known in
815 // sparse linear algebra. See:
816 // - John R. Gilbert and Joseph W. H. Liu, "Elimination structures for
817 // unsymmetric sparse LU factors", Tech. Report CS-90-11. Departement of
818 // Computer Science, York University, North York. Ontario, Canada, 1990.
819 // - Stanley C. Eisenstat and Joseph W. H. Liu, "Exploiting structural
820 // symmetry in a sparse partial pivoting code". SIAM J. Sci. Comput. Vol
821 // 14, No 1, pp. 253-257, January 1993.
822 //
823 // Note that we use an original algorithm and prune the graph while performing
824 // the symbolic phase. Hence the pruning will only benefit the next symbolic
825 // phase. This is different from Eisenstat-Liu's symmetric pruning. It is
826 // still a heuristic and will not necessarily find the minimal graph that
827 // has the same result for the symbolic phase though.
828 //
829 // TODO(user): Use this during the "normal" hyper-sparse solves so that
830 // we can benefit from the pruned lower matrix there?
832
834};
835
836} // namespace glop
837} // namespace operations_research
838
839#endif // OR_TOOLS_LP_DATA_SPARSE_H_
int64 max
Definition: alldiff_cst.cc:139
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
bool ColumnIsEmpty(ColIndex col) const
Definition: sparse.h:376
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:418
StrictITIVector< ColIndex, EntryIndex > starts_
Definition: sparse.h:461
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:405
void ColumnCopyToClearedDenseColumnWithNonZeros(ColIndex col, DenseColumn *dense_column, RowIndexVector *non_zeros) const
Definition: sparse.h:436
StrictITIVector< EntryIndex, RowIndex > rows_
Definition: sparse.h:460
CompactSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.h:294
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse.h:361
StrictITIVector< EntryIndex, Fractional > coefficients_
Definition: sparse.h:459
void ColumnCopyToClearedDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:426
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:382
::util::IntegerRange< EntryIndex > Column(ColIndex col) const
Definition: sparse.h:358
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:393
RowIndex EntryRow(EntryIndex i) const
Definition: sparse.h:362
ColumnView column(ColIndex col) const
Definition: sparse.h:364
EntryIndex ColumnNumEntries(ColIndex col) const
Definition: sparse.h:335
const ColumnView column(ColIndex col) const
Definition: sparse.h:481
CompactSparseMatrixView(const CompactSparseMatrix *compact_matrix, const RowToColMapping *basis)
Definition: sparse.h:473
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:423
void PopulateFromMatrix(const SparseMatrix &matrix)
Definition: sparse.h:226
Fractional ComputeOneNorm() const
Definition: sparse.cc:420
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:264
void PopulateFromMatrixPair(const SparseMatrix &matrix_a, const SparseMatrix &matrix_b)
Definition: sparse.h:237
MatrixView(const SparseMatrix &matrix)
Definition: sparse.h:221
void PopulateFromBasis(const MatrixView &matrix, const RowToColMapping &basis)
Definition: sparse.h:251
EntryIndex num_entries() const
Definition: sparse.cc:419
void AppendUnitVector(RowIndex row, Fractional value)
Definition: sparse.cc:151
void PopulateFromLinearCombination(Fractional alpha, const SparseMatrix &a, Fractional beta, const SparseMatrix &b)
Definition: sparse.cc:225
void PopulateFromPermutedMatrix(const Matrix &a, const RowPermutation &row_perm, const ColumnPermutation &inverse_col_perm)
Definition: sparse.cc:212
void PopulateFromTranspose(const Matrix &input)
Definition: sparse.cc:181
void PopulateFromIdentity(ColIndex num_cols)
Definition: sparse.cc:172
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:395
void SetNumRows(RowIndex num_rows)
Definition: sparse.cc:143
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:181
Fractional LookUpValue(RowIndex row, ColIndex col) const
Definition: sparse.cc:323
void Swap(SparseMatrix *matrix)
Definition: sparse.cc:158
void ComputeMinAndMaxMagnitudes(Fractional *min_magnitude, Fractional *max_magnitude) const
Definition: sparse.cc:369
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:180
void DeleteRows(RowIndex num_rows, const RowPermutation &permutation)
Definition: sparse.cc:289
void PopulateFromProduct(const SparseMatrix &a, const SparseMatrix &b)
Definition: sparse.cc:250
bool AppendRowsFromSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.cc:302
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: sparse.cc:276
void PopulateFromSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.cc:206
void ApplyRowPermutation(const RowPermutation &row_perm)
Definition: sparse.cc:316
void PopulateFromZero(RowIndex num_rows, ColIndex num_cols)
Definition: sparse.cc:164
bool Equals(const SparseMatrix &a, Fractional tolerance) const
Definition: sparse.cc:327
Fractional GetDiagonalCoefficient(ColIndex col) const
Definition: sparse.h:572
bool ColumnIsDiagonalOnly(ColIndex col) const
Definition: sparse.h:577
int64 value
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: macros.h:29
ColIndex col
Definition: markowitz.cc:176
RowIndex row
Definition: markowitz.cc:175
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:51
Permutation< ColIndex > ColumnPermutation
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
std::vector< RowIndex > RowIndexVector
Definition: lp_types.h:309
Permutation< RowIndex > RowPermutation
IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint)
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
static int input(yyscan_t yyscanner)
EntryIndex num_entries
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
void Add(Index index, Fractional value)