OR-Tools  8.2
update_row.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
17
18namespace operations_research {
19namespace glop {
20
22 const CompactSparseMatrix& transposed_matrix,
23 const VariablesInfo& variables_info,
24 const RowToColMapping& basis,
25 const BasisFactorization& basis_factorization)
26 : matrix_(matrix),
27 transposed_matrix_(transposed_matrix),
28 variables_info_(variables_info),
29 basis_(basis),
30 basis_factorization_(basis_factorization),
31 unit_row_left_inverse_(),
32 non_zero_position_list_(),
33 non_zero_position_set_(),
34 coefficient_(),
35 compute_update_row_(true),
36 num_operations_(0),
37 parameters_(),
38 stats_() {}
39
41 SCOPED_TIME_STAT(&stats_);
42 compute_update_row_ = true;
43}
44
46 SCOPED_TIME_STAT(&stats_);
47 if (col >= coefficient_.size()) return;
48 coefficient_[col] = 0.0;
49}
50
52 DCHECK(!compute_update_row_);
53 return unit_row_left_inverse_;
54}
55
57 RowIndex leaving_row) {
58 Invalidate();
59 basis_factorization_.TemporaryLeftSolveForUnitRow(RowToColIndex(leaving_row),
60 &unit_row_left_inverse_);
61 return unit_row_left_inverse_;
62}
63
64void UpdateRow::ComputeUnitRowLeftInverse(RowIndex leaving_row) {
65 SCOPED_TIME_STAT(&stats_);
66 basis_factorization_.LeftSolveForUnitRow(RowToColIndex(leaving_row),
67 &unit_row_left_inverse_);
68
69 // TODO(user): Refactorize if the estimated accuracy is above a threshold.
70 IF_STATS_ENABLED(stats_.unit_row_left_inverse_accuracy.Add(
71 matrix_.ColumnScalarProduct(basis_[leaving_row],
72 unit_row_left_inverse_.values) -
73 1.0));
74 IF_STATS_ENABLED(stats_.unit_row_left_inverse_density.Add(
75 Density(unit_row_left_inverse_.values)));
76}
77
78void UpdateRow::ComputeUpdateRow(RowIndex leaving_row) {
79 if (!compute_update_row_ && update_row_computed_for_ == leaving_row) return;
80 compute_update_row_ = false;
81 update_row_computed_for_ = leaving_row;
82 ComputeUnitRowLeftInverse(leaving_row);
83 SCOPED_TIME_STAT(&stats_);
84
85 if (parameters_.use_transposed_matrix()) {
86 // Number of entries that ComputeUpdatesRowWise() will need to look at.
87 EntryIndex num_row_wise_entries(0);
88
89 // Because we are about to do an expensive matrix-vector product, we make
90 // sure we drop small entries in the vector for the row-wise algorithm. We
91 // also computes its non-zeros to simplify the code below.
92 //
93 // TODO(user): So far we didn't generalize the use of drop tolerances
94 // everywhere in the solver, so we make sure to not modify
95 // unit_row_left_inverse_ that is also used elsewhere. However, because of
96 // that, we will not get the exact same result depending on the algortihm
97 // used below because the ComputeUpdatesColumnWise() will still use these
98 // small entries (no complexity changes).
99 const Fractional drop_tolerance = parameters_.drop_tolerance();
100 unit_row_left_inverse_filtered_non_zeros_.clear();
101 if (unit_row_left_inverse_.non_zeros.empty()) {
102 const ColIndex size = unit_row_left_inverse_.values.size();
103 for (ColIndex col(0); col < size; ++col) {
104 if (std::abs(unit_row_left_inverse_.values[col]) > drop_tolerance) {
105 unit_row_left_inverse_filtered_non_zeros_.push_back(col);
106 num_row_wise_entries += transposed_matrix_.ColumnNumEntries(col);
107 }
108 }
109 } else {
110 for (const auto e : unit_row_left_inverse_) {
111 if (std::abs(e.coefficient()) > drop_tolerance) {
112 unit_row_left_inverse_filtered_non_zeros_.push_back(e.column());
113 num_row_wise_entries +=
114 transposed_matrix_.ColumnNumEntries(e.column());
115 }
116 }
117 }
118
119 // Number of entries that ComputeUpdatesColumnWise() will need to look at.
120 const EntryIndex num_col_wise_entries =
121 variables_info_.GetNumEntriesInRelevantColumns();
122
123 // Note that the thresholds were chosen (more or less) from the result of
124 // the microbenchmark tests of this file in September 2013.
125 // TODO(user): automate the computation of these constants at run-time?
126 const double row_wise = static_cast<double>(num_row_wise_entries.value());
127 if (row_wise < 0.5 * static_cast<double>(num_col_wise_entries.value())) {
128 if (row_wise < 1.1 * static_cast<double>(matrix_.num_cols().value())) {
129 ComputeUpdatesRowWiseHypersparse();
130 num_operations_ += num_row_wise_entries.value();
131 } else {
132 ComputeUpdatesRowWise();
133 num_operations_ +=
134 num_row_wise_entries.value() + matrix_.num_rows().value();
135 }
136 } else {
137 ComputeUpdatesColumnWise();
138 num_operations_ +=
139 num_col_wise_entries.value() + matrix_.num_cols().value();
140 }
141 } else {
142 ComputeUpdatesColumnWise();
143 num_operations_ +=
144 variables_info_.GetNumEntriesInRelevantColumns().value() +
145 matrix_.num_cols().value();
146 }
147 IF_STATS_ENABLED(stats_.update_row_density.Add(
148 static_cast<double>(non_zero_position_list_.size()) /
149 static_cast<double>(matrix_.num_cols().value())));
150}
151
153 const std::string& algorithm) {
154 unit_row_left_inverse_.values = lhs;
155 ComputeNonZeros(lhs, &unit_row_left_inverse_filtered_non_zeros_);
156 if (algorithm == "column") {
157 ComputeUpdatesColumnWise();
158 } else if (algorithm == "row") {
159 ComputeUpdatesRowWise();
160 } else if (algorithm == "row_hypersparse") {
161 ComputeUpdatesRowWiseHypersparse();
162 } else {
163 LOG(DFATAL) << "Unknown algorithm in ComputeUpdateRowForBenchmark(): '"
164 << algorithm << "'";
165 }
166}
167
168const DenseRow& UpdateRow::GetCoefficients() const { return coefficient_; }
169
171 return non_zero_position_list_;
172}
173
174void UpdateRow::SetParameters(const GlopParameters& parameters) {
175 parameters_ = parameters;
176}
177
178// This is optimized for the case when the total number of entries is about
179// the same as, or greater than, the number of columns.
180void UpdateRow::ComputeUpdatesRowWise() {
181 SCOPED_TIME_STAT(&stats_);
182 const ColIndex num_cols = matrix_.num_cols();
183 coefficient_.AssignToZero(num_cols);
184 for (ColIndex col : unit_row_left_inverse_filtered_non_zeros_) {
185 const Fractional multiplier = unit_row_left_inverse_[col];
186 for (const EntryIndex i : transposed_matrix_.Column(col)) {
187 const ColIndex pos = RowToColIndex(transposed_matrix_.EntryRow(i));
188 coefficient_[pos] += multiplier * transposed_matrix_.EntryCoefficient(i);
189 }
190 }
191
192 non_zero_position_list_.clear();
193 const Fractional drop_tolerance = parameters_.drop_tolerance();
194 for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
195 if (std::abs(coefficient_[col]) > drop_tolerance) {
196 non_zero_position_list_.push_back(col);
197 }
198 }
199}
200
201// This is optimized for the case when the total number of entries is smaller
202// than the number of columns.
203void UpdateRow::ComputeUpdatesRowWiseHypersparse() {
204 SCOPED_TIME_STAT(&stats_);
205 const ColIndex num_cols = matrix_.num_cols();
206 non_zero_position_set_.ClearAndResize(num_cols);
207 coefficient_.resize(num_cols, 0.0);
208 for (ColIndex col : unit_row_left_inverse_filtered_non_zeros_) {
209 const Fractional multiplier = unit_row_left_inverse_[col];
210 for (const EntryIndex i : transposed_matrix_.Column(col)) {
211 const ColIndex pos = RowToColIndex(transposed_matrix_.EntryRow(i));
212 const Fractional v = multiplier * transposed_matrix_.EntryCoefficient(i);
213 if (!non_zero_position_set_.IsSet(pos)) {
214 // Note that we could create the non_zero_position_list_ here, but we
215 // prefer to keep the non-zero positions sorted, so using the bitset is
216 // a good alernative. Of course if the solution is really really sparse,
217 // then sorting non_zero_position_list_ will be faster.
218 coefficient_[pos] = v;
219 non_zero_position_set_.Set(pos);
220 } else {
221 coefficient_[pos] += v;
222 }
223 }
224 }
225
226 // Only keep in non_zero_position_set_ the relevant positions.
227 non_zero_position_set_.Intersection(variables_info_.GetIsRelevantBitRow());
228 non_zero_position_list_.clear();
229 const Fractional drop_tolerance = parameters_.drop_tolerance();
230 for (const ColIndex col : non_zero_position_set_) {
231 // TODO(user): Since the solution is really sparse, maybe storing the
232 // non-zero coefficients contiguously in a vector is better than keeping
233 // them as they are. Note however that we will iterate only twice on the
234 // update row coefficients during an iteration.
235 if (std::abs(coefficient_[col]) > drop_tolerance) {
236 non_zero_position_list_.push_back(col);
237 }
238 }
239}
240
241// Note that we use the same algo as ComputeUpdatesColumnWise() here. The
242// others version might be faster, but this is called only once per solve, so
243// it shouldn't be too bad.
244void UpdateRow::RecomputeFullUpdateRow(RowIndex leaving_row) {
245 CHECK(!compute_update_row_);
246 const ColIndex num_cols = matrix_.num_cols();
247 const Fractional drop_tolerance = parameters_.drop_tolerance();
248 coefficient_.resize(num_cols, 0.0);
249 non_zero_position_list_.clear();
250
251 // Fills the only position at one in the basic columns.
252 coefficient_[basis_[leaving_row]] = 1.0;
253 non_zero_position_list_.push_back(basis_[leaving_row]);
254
255 // Fills the non-basic column.
256 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
257 const Fractional coeff =
258 matrix_.ColumnScalarProduct(col, unit_row_left_inverse_.values);
259 if (std::abs(coeff) > drop_tolerance) {
260 non_zero_position_list_.push_back(col);
261 coefficient_[col] = coeff;
262 }
263 }
264}
265
266void UpdateRow::ComputeUpdatesColumnWise() {
267 SCOPED_TIME_STAT(&stats_);
268
269 const ColIndex num_cols = matrix_.num_cols();
270 const Fractional drop_tolerance = parameters_.drop_tolerance();
271 coefficient_.resize(num_cols, 0.0);
272 non_zero_position_list_.clear();
273 for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
274 // Coefficient of the column right inverse on the 'leaving_row'.
275 const Fractional coeff =
276 matrix_.ColumnScalarProduct(col, unit_row_left_inverse_.values);
277 // Nothing to do if 'coeff' is (almost) zero which does happen due to
278 // sparsity. Note that it shouldn't be too bad to use a non-zero drop
279 // tolerance here because even if we introduce some precision issues, the
280 // quantities updated by this update row will eventually be recomputed.
281 if (std::abs(coeff) > drop_tolerance) {
282 non_zero_position_list_.push_back(col);
283 coefficient_[col] = coeff;
284 }
285 }
286}
287
288} // namespace glop
289} // namespace operations_research
#define CHECK(condition)
Definition: base/logging.h:495
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:884
void ClearAndResize(IndexType size)
Definition: bitset.h:436
void Set(IndexType i)
Definition: bitset.h:491
void Intersection(const Bitset64< IndexType > &other)
Definition: bitset.h:539
bool IsSet(IndexType i) const
Definition: bitset.h:481
void LeftSolveForUnitRow(ColIndex j, ScatteredRow *y) const
void TemporaryLeftSolveForUnitRow(ColIndex j, ScatteredRow *y) const
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse.h:361
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:382
::util::IntegerRange< EntryIndex > Column(ColIndex col) const
Definition: sparse.h:358
RowIndex EntryRow(EntryIndex i) const
Definition: sparse.h:362
EntryIndex ColumnNumEntries(ColIndex col) const
Definition: sparse.h:335
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:51
const ScatteredRow & ComputeAndGetUnitRowLeftInverse(RowIndex leaving_row)
Definition: update_row.cc:56
const DenseRow & GetCoefficients() const
Definition: update_row.cc:168
void ComputeUpdateRowForBenchmark(const DenseRow &lhs, const std::string &algorithm)
Definition: update_row.cc:152
UpdateRow(const CompactSparseMatrix &matrix, const CompactSparseMatrix &transposed_matrix, const VariablesInfo &variables_info, const RowToColMapping &basis, const BasisFactorization &basis_factorization)
Definition: update_row.cc:21
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:244
void IgnoreUpdatePosition(ColIndex col)
Definition: update_row.cc:45
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:78
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:174
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:170
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetIsRelevantBitRow() const
SatParameters parameters
ColIndex col
Definition: markowitz.cc:176
std::vector< ColIndex > ColIndexVector
Definition: lp_types.h:308
void ComputeNonZeros(const StrictITIVector< IndexType, Fractional > &input, std::vector< IndexType > *non_zeros)
double Density(const DenseRow &row)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
StrictITIVector< Index, Fractional > values