OR-Tools  8.2
revised_simplex.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <functional>
19#include <map>
20#include <string>
21#include <utility>
22#include <vector>
23
24#include "absl/strings/str_cat.h"
25#include "absl/strings/str_format.h"
31#include "ortools/glop/parameters.pb.h"
38
39ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
40 "Display numbers as fractions.");
41ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
42 "Stop after first basis has been computed.");
43ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
44 "Stop after first phase has been completed.");
45ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
46
47namespace operations_research {
48namespace glop {
49namespace {
50
51// Calls the given closure upon destruction. It can be used to ensure that a
52// closure is executed whenever a function returns.
53class Cleanup {
54 public:
55 explicit Cleanup(std::function<void()> closure)
56 : closure_(std::move(closure)) {}
57 ~Cleanup() { closure_(); }
58
59 private:
60 std::function<void()> closure_;
61};
62} // namespace
63
64#define DCHECK_COL_BOUNDS(col) \
65 { \
66 DCHECK_LE(0, col); \
67 DCHECK_GT(num_cols_, col); \
68 }
69
70#define DCHECK_ROW_BOUNDS(row) \
71 { \
72 DCHECK_LE(0, row); \
73 DCHECK_GT(num_rows_, row); \
74 }
75
76constexpr const uint64 kDeterministicSeed = 42;
77
79 : problem_status_(ProblemStatus::INIT),
80 num_rows_(0),
81 num_cols_(0),
82 first_slack_col_(0),
83 objective_(),
84 lower_bound_(),
85 upper_bound_(),
86 basis_(),
87 variable_name_(),
88 direction_(),
89 error_(),
90 basis_factorization_(&compact_matrix_, &basis_),
91 variables_info_(compact_matrix_, lower_bound_, upper_bound_),
92 variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
93 basis_factorization_),
94 dual_edge_norms_(basis_factorization_),
95 primal_edge_norms_(compact_matrix_, variables_info_,
96 basis_factorization_),
97 update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
98 basis_factorization_),
99 reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
100 basis_factorization_, &random_),
101 entering_variable_(variables_info_, &random_, &reduced_costs_,
102 &primal_edge_norms_),
103 num_iterations_(0),
104 num_feasibility_iterations_(0),
105 num_optimization_iterations_(0),
106 total_time_(0.0),
107 feasibility_time_(0.0),
108 optimization_time_(0.0),
109 last_deterministic_time_update_(0.0),
110 iteration_stats_(),
111 ratio_test_stats_(),
112 function_stats_("SimplexFunctionStats"),
113 parameters_(),
114 test_lu_(),
115 feasibility_phase_(true),
116 random_(kDeterministicSeed) {
117 SetParameters(parameters_);
118}
119
121 SCOPED_TIME_STAT(&function_stats_);
122 solution_state_.statuses.clear();
123}
124
126 SCOPED_TIME_STAT(&function_stats_);
127 solution_state_ = state;
128 solution_state_has_been_set_externally_ = true;
129}
130
132 notify_that_matrix_is_unchanged_ = true;
133}
134
136 SCOPED_TIME_STAT(&function_stats_);
137 DCHECK(lp.IsCleanedUp());
139 if (!lp.IsInEquationForm()) {
141 "The problem is not in the equations form.");
142 }
143 Cleanup update_deterministic_time_on_return(
144 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
145
146 // Initialization. Note That Initialize() must be called first since it
147 // analyzes the current solver state.
148 const double start_time = time_limit->GetElapsedTime();
149 GLOP_RETURN_IF_ERROR(Initialize(lp));
150
151 dual_infeasibility_improvement_direction_.clear();
152 update_row_.Invalidate();
153 test_lu_.Clear();
154 problem_status_ = ProblemStatus::INIT;
155 feasibility_phase_ = true;
156 num_iterations_ = 0;
157 num_feasibility_iterations_ = 0;
158 num_optimization_iterations_ = 0;
159 feasibility_time_ = 0.0;
160 optimization_time_ = 0.0;
161 total_time_ = 0.0;
162
163 // In case we abort because of an error, we cannot assume that the current
164 // solution state will be in sync with all our internal data structure. In
165 // case we abort without resetting it, setting this allow us to still use the
166 // previous state info, but we will double-check everything.
167 solution_state_has_been_set_externally_ = true;
168
169 if (VLOG_IS_ON(1)) {
170 ComputeNumberOfEmptyRows();
171 ComputeNumberOfEmptyColumns();
172 DisplayBasicVariableStatistics();
173 DisplayProblem();
174 }
175 if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
176 DisplayAllStats();
177 return Status::OK();
178 }
179
180 const bool use_dual = parameters_.use_dual_simplex();
181 const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
182 if (log_info) {
183 LOG(INFO) << "------ " << (use_dual ? "Dual simplex." : "Primal simplex.");
184 LOG(INFO) << "The matrix has " << compact_matrix_.num_rows() << " rows, "
185 << compact_matrix_.num_cols() << " columns, "
186 << compact_matrix_.num_entries() << " entries.";
187 }
188
189 // TODO(user): Avoid doing the first phase checks when we know from the
190 // incremental solve that the solution is already dual or primal feasible.
191 if (log_info) LOG(INFO) << "------ First phase: feasibility.";
192 entering_variable_.SetPricingRule(parameters_.feasibility_rule());
193 if (use_dual) {
194 if (parameters_.perturb_costs_in_dual_simplex()) {
195 reduced_costs_.PerturbCosts();
196 }
197
198 variables_info_.MakeBoxedVariableRelevant(false);
199 GLOP_RETURN_IF_ERROR(DualMinimize(time_limit));
200 DisplayIterationInfo();
201
202 if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
203 // Note(user): In most cases, the matrix will already be refactorized and
204 // both Refactorize() and PermuteBasis() will do nothing. However, if the
205 // time limit is reached during the first phase, this might not be the
206 // case and RecomputeBasicVariableValues() below DCHECKs that the matrix
207 // is refactorized. This is not required, but we currently only want to
208 // recompute values from scratch when the matrix was just refactorized to
209 // maximize precision.
210 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
211 PermuteBasis();
212
213 variables_info_.MakeBoxedVariableRelevant(true);
214 reduced_costs_.MakeReducedCostsPrecise();
215
216 // This is needed to display errors properly.
217 MakeBoxedVariableDualFeasible(variables_info_.GetNonBasicBoxedVariables(),
218 /*update_basic_values=*/false);
219 variable_values_.RecomputeBasicVariableValues();
220 variable_values_.ResetPrimalInfeasibilityInformation();
221 }
222 } else {
223 reduced_costs_.MaintainDualInfeasiblePositions(true);
225 DisplayIterationInfo();
226
227 // After the primal phase I, we need to restore the objective.
228 if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
229 InitializeObjectiveAndTestIfUnchanged(lp);
230 reduced_costs_.ResetForNewObjective();
231 }
232 }
233
234 // Reduced costs must be explicitly recomputed because DisplayErrors() is
235 // const.
236 // TODO(user): This API is not really nice.
237 reduced_costs_.GetReducedCosts();
238 DisplayErrors();
239
240 feasibility_phase_ = false;
241 feasibility_time_ = time_limit->GetElapsedTime() - start_time;
242 entering_variable_.SetPricingRule(parameters_.optimization_rule());
243 num_feasibility_iterations_ = num_iterations_;
244
245 if (log_info) LOG(INFO) << "------ Second phase: optimization.";
246
247 // Because of shifts or perturbations, we may need to re-run a dual simplex
248 // after the primal simplex finished, or the opposite.
249 //
250 // We alter between solving with primal and dual Phase II algorithm as long as
251 // time limit permits *and* we did not yet achieve the desired precision.
252 // I.e., we run iteration i if the solution from iteration i-1 was not precise
253 // after we removed the bound and cost shifts and perturbations.
254 //
255 // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
256 // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
257 // (i.e., these statuses are not necesserily a consequence of hitting a time
258 // limit).
259 for (int num_optims = 0;
260 // We want to enter the loop when both num_optims and num_iterations_ are
261 // *equal* to the corresponding limits (to return a meaningful status
262 // when the limits are set to 0).
263 num_optims <= parameters_.max_number_of_reoptimizations() &&
264 !objective_limit_reached_ &&
265 (num_iterations_ == 0 ||
266 num_iterations_ < parameters_.max_number_of_iterations()) &&
267 !time_limit->LimitReached() &&
268 !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
269 (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
270 problem_status_ == ProblemStatus::DUAL_FEASIBLE);
271 ++num_optims) {
272 if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
273 // Run the primal simplex.
274 reduced_costs_.MaintainDualInfeasiblePositions(true);
276 } else {
277 // Run the dual simplex.
278 reduced_costs_.MaintainDualInfeasiblePositions(false);
279 GLOP_RETURN_IF_ERROR(DualMinimize(time_limit));
280 }
281
282 // Minimize() or DualMinimize() always double check the result with maximum
283 // precision by refactoring the basis before exiting (except if an
284 // iteration or time limit was reached).
285 DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
286 problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
287 basis_factorization_.IsRefactorized());
288
289 // If SetIntegralityScale() was called, we preform a polish operation.
290 if (!integrality_scale_.empty() &&
291 problem_status_ == ProblemStatus::OPTIMAL) {
292 reduced_costs_.MaintainDualInfeasiblePositions(true);
294 }
295
296 // Remove the bound and cost shifts (or perturbations).
297 //
298 // Note(user): Currently, we never do both at the same time, so we could
299 // be a bit faster here, but then this is quick anyway.
300 variable_values_.ResetAllNonBasicVariableValues();
301 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
302 PermuteBasis();
303 variable_values_.RecomputeBasicVariableValues();
304 reduced_costs_.ClearAndRemoveCostShifts();
305
306 // Reduced costs must be explicitly recomputed because DisplayErrors() is
307 // const.
308 // TODO(user): This API is not really nice.
309 reduced_costs_.GetReducedCosts();
310 DisplayIterationInfo();
311 DisplayErrors();
312
313 // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
314 // status by checking with the other phase I that the problem is really
315 // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
316 // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
317 // OPTIMAL and the dual does not have issues on this problem.
318 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
319 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
320 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
321 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
322 reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
323 if (log_info) {
324 LOG(INFO) << "DUAL_UNBOUNDED was reported, but the residual and/or "
325 << "dual infeasibility is above the tolerance";
326 }
327 }
328 break;
329 }
330
331 // Change the status, if after the shift and perturbation removal the
332 // problem is not OPTIMAL anymore.
333 if (problem_status_ == ProblemStatus::OPTIMAL) {
334 const Fractional solution_tolerance =
335 parameters_.solution_feasibility_tolerance();
336 if (variable_values_.ComputeMaximumPrimalResidual() >
337 solution_tolerance ||
338 reduced_costs_.ComputeMaximumDualResidual() > solution_tolerance) {
339 if (log_info) {
340 LOG(INFO) << "OPTIMAL was reported, yet one of the residuals is "
341 "above the solution feasibility tolerance after the "
342 "shift/perturbation are removed.";
343 }
344 if (parameters_.change_status_to_imprecise()) {
345 problem_status_ = ProblemStatus::IMPRECISE;
346 }
347 } else {
348 // We use the "precise" tolerances here to try to report the best
349 // possible solution.
350 const Fractional primal_tolerance =
351 parameters_.primal_feasibility_tolerance();
352 const Fractional dual_tolerance =
353 parameters_.dual_feasibility_tolerance();
354 const Fractional primal_infeasibility =
355 variable_values_.ComputeMaximumPrimalInfeasibility();
356 const Fractional dual_infeasibility =
357 reduced_costs_.ComputeMaximumDualInfeasibility();
358 if (primal_infeasibility > primal_tolerance &&
359 dual_infeasibility > dual_tolerance) {
360 if (log_info) {
361 LOG(INFO) << "OPTIMAL was reported, yet both of the infeasibility "
362 "are above the tolerance after the "
363 "shift/perturbation are removed.";
364 }
365 if (parameters_.change_status_to_imprecise()) {
366 problem_status_ = ProblemStatus::IMPRECISE;
367 }
368 } else if (primal_infeasibility > primal_tolerance) {
369 if (log_info) LOG(INFO) << "Re-optimizing with dual simplex ... ";
370 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
371 } else if (dual_infeasibility > dual_tolerance) {
372 if (log_info) LOG(INFO) << "Re-optimizing with primal simplex ... ";
373 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
374 }
375 }
376 }
377 }
378
379 // Check that the return status is "precise".
380 //
381 // TODO(user): we curretnly skip the DUAL_INFEASIBLE status because the
382 // quantities are not up to date in this case.
383 if (parameters_.change_status_to_imprecise() &&
384 problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
385 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
386 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
387 reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
388 problem_status_ = ProblemStatus::IMPRECISE;
389 } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
390 problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
391 problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
392 if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
393 problem_status_ = ProblemStatus::IMPRECISE;
394 }
395 } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
396 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
397 problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
398 if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
399 problem_status_ = ProblemStatus::IMPRECISE;
400 }
401 }
402 }
403
404 // Store the result for the solution getters.
405 SaveState();
406 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
407 solution_dual_values_ = reduced_costs_.GetDualValues();
408 solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
409 if (lp.IsMaximizationProblem()) {
410 ChangeSign(&solution_dual_values_);
411 ChangeSign(&solution_reduced_costs_);
412 }
413
414 // If the problem is unbounded, set the objective value to +/- infinity.
415 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
416 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
417 solution_objective_value_ =
418 (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
419 : -kInfinity;
420 if (lp.IsMaximizationProblem()) {
421 solution_objective_value_ = -solution_objective_value_;
422 }
423 }
424
425 total_time_ = time_limit->GetElapsedTime() - start_time;
426 optimization_time_ = total_time_ - feasibility_time_;
427 num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
428
429 DisplayAllStats();
430 return Status::OK();
431}
432
434 return problem_status_;
435}
436
438 return solution_objective_value_;
439}
440
441int64 RevisedSimplex::GetNumberOfIterations() const { return num_iterations_; }
442
443RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
444
445ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
446
448 return variable_values_.Get(col);
449}
450
452 return solution_reduced_costs_[col];
453}
454
456 return solution_reduced_costs_;
457}
458
460 return solution_dual_values_[row];
461}
462
464 return variables_info_.GetStatusRow()[col];
465}
466
467const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
468
470 // Note the negative sign since the slack variable is such that
471 // constraint_activity + slack_value = 0.
472 return -variable_values_.Get(SlackColIndex(row));
473}
474
476 // The status of the given constraint is the same as the status of the
477 // associated slack variable with a change of sign.
478 const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
481 }
484 }
486}
487
490 return solution_primal_ray_;
491}
494 return solution_dual_ray_;
495}
496
499 return solution_dual_ray_row_combination_;
500}
501
502ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
503
505 DCHECK(basis_factorization_.GetColumnPermutation().empty());
506 return basis_factorization_;
507}
508
509std::string RevisedSimplex::GetPrettySolverStats() const {
510 return absl::StrFormat(
511 "Problem status : %s\n"
512 "Solving time : %-6.4g\n"
513 "Number of iterations : %u\n"
514 "Time for solvability (first phase) : %-6.4g\n"
515 "Number of iterations for solvability : %u\n"
516 "Time for optimization : %-6.4g\n"
517 "Number of iterations for optimization : %u\n"
518 "Stop after first basis : %d\n",
519 GetProblemStatusString(problem_status_), total_time_, num_iterations_,
520 feasibility_time_, num_feasibility_iterations_, optimization_time_,
521 num_optimization_iterations_,
522 absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
523}
524
526 // TODO(user): Also take into account the dual edge norms and the reduced cost
527 // updates.
528 return basis_factorization_.DeterministicTime() +
529 update_row_.DeterministicTime() +
530 primal_edge_norms_.DeterministicTime();
531}
532
533void RevisedSimplex::SetVariableNames() {
534 variable_name_.resize(num_cols_, "");
535 for (ColIndex col(0); col < first_slack_col_; ++col) {
536 const ColIndex var_index = col + 1;
537 variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
538 }
539 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
540 const ColIndex var_index = col - first_slack_col_ + 1;
541 variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
542 }
543}
544
545VariableStatus RevisedSimplex::ComputeDefaultVariableStatus(
546 ColIndex col) const {
548 if (lower_bound_[col] == upper_bound_[col]) {
550 }
551 if (lower_bound_[col] == -kInfinity && upper_bound_[col] == kInfinity) {
553 }
554
555 // Returns the bound with the lowest magnitude. Note that it must be finite
556 // because the VariableStatus::FREE case was tested earlier.
557 DCHECK(IsFinite(lower_bound_[col]) || IsFinite(upper_bound_[col]));
558 return std::abs(lower_bound_[col]) <= std::abs(upper_bound_[col])
561}
562
563void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
564 ColIndex col, VariableStatus status) {
565 variables_info_.UpdateToNonBasicStatus(col, status);
566 variable_values_.SetNonBasicVariableValueFromStatus(col);
567}
568
569bool RevisedSimplex::BasisIsConsistent() const {
570 const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
571 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
572 for (RowIndex row(0); row < num_rows_; ++row) {
573 const ColIndex col = basis_[row];
574 if (!is_basic.IsSet(col)) return false;
575 if (variable_statuses[col] != VariableStatus::BASIC) return false;
576 }
577 ColIndex cols_in_basis(0);
578 ColIndex cols_not_in_basis(0);
579 for (ColIndex col(0); col < num_cols_; ++col) {
580 cols_in_basis += is_basic.IsSet(col);
581 cols_not_in_basis += !is_basic.IsSet(col);
582 if (is_basic.IsSet(col) !=
583 (variable_statuses[col] == VariableStatus::BASIC)) {
584 return false;
585 }
586 }
587 if (cols_in_basis != RowToColIndex(num_rows_)) return false;
588 if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
589 return true;
590}
591
592// Note(user): The basis factorization is not updated by this function but by
593// UpdateAndPivot().
594void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
595 VariableStatus leaving_variable_status) {
596 SCOPED_TIME_STAT(&function_stats_);
597 DCHECK_COL_BOUNDS(entering_col);
598 DCHECK_ROW_BOUNDS(basis_row);
599
600 // Check that this is not called with an entering_col already in the basis
601 // and that the leaving col is indeed in the basis.
602 DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
603 DCHECK_NE(basis_[basis_row], entering_col);
604 DCHECK_NE(basis_[basis_row], kInvalidCol);
605
606 const ColIndex leaving_col = basis_[basis_row];
607 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
608
609 // Make leaving_col leave the basis and update relevant data.
610 // Note thate the leaving variable value is not necessarily at its exact
611 // bound, which is like a bound shift.
612 variables_info_.Update(leaving_col, leaving_variable_status);
613 DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
614 leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
615 leaving_variable_status == VariableStatus::FIXED_VALUE);
616
617 basis_[basis_row] = entering_col;
618 variables_info_.Update(entering_col, VariableStatus::BASIC);
619 update_row_.Invalidate();
620}
621
622namespace {
623
624// Comparator used to sort column indices according to a given value vector.
625class ColumnComparator {
626 public:
627 explicit ColumnComparator(const DenseRow& value) : value_(value) {}
628 bool operator()(ColIndex col_a, ColIndex col_b) const {
629 return value_[col_a] < value_[col_b];
630 }
631
632 private:
633 const DenseRow& value_;
634};
635
636} // namespace
637
638// To understand better what is going on in this function, let us say that this
639// algorithm will produce the optimal solution to a problem containing only
640// singleton columns (provided that the variables start at the minimum possible
641// cost, see ComputeDefaultVariableStatus()). This is unit tested.
642//
643// The error_ must be equal to the constraint activity for the current variable
644// values before this function is called. If error_[row] is 0.0, that mean this
645// constraint is currently feasible.
646void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
647 SCOPED_TIME_STAT(&function_stats_);
648 // Computes the singleton columns and the cost variation of the corresponding
649 // variables (in the only possible direction, i.e away from its current bound)
650 // for a unit change in the infeasibility of the corresponding row.
651 //
652 // Note that the slack columns will be treated as normal singleton columns.
653 std::vector<ColIndex> singleton_column;
654 DenseRow cost_variation(num_cols_, 0.0);
655 for (ColIndex col(0); col < num_cols_; ++col) {
656 if (compact_matrix_.column(col).num_entries() != 1) continue;
657 if (lower_bound_[col] == upper_bound_[col]) continue;
658 const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
659 if (variable_values_.Get(col) == lower_bound_[col]) {
660 cost_variation[col] = objective_[col] / std::abs(slope);
661 } else {
662 cost_variation[col] = -objective_[col] / std::abs(slope);
663 }
664 singleton_column.push_back(col);
665 }
666 if (singleton_column.empty()) return;
667
668 // Sort the singleton columns for the case where many of them correspond to
669 // the same row (equivalent to a piecewise-linear objective on this variable).
670 // Negative cost_variation first since moving the singleton variable away from
671 // its current bound means the least decrease in the objective function for
672 // the same "error" variation.
673 ColumnComparator comparator(cost_variation);
674 std::sort(singleton_column.begin(), singleton_column.end(), comparator);
675 DCHECK_LE(cost_variation[singleton_column.front()],
676 cost_variation[singleton_column.back()]);
677
678 // Use a singleton column to "absorb" the error when possible to avoid
679 // introducing unneeded artificial variables. Note that with scaling on, the
680 // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
681 // them) and that the SingletonColumnSignPreprocessor makes them all positive.
682 // However, this code works for any coefficient value.
683 const DenseRow& variable_values = variable_values_.GetDenseRow();
684 for (const ColIndex col : singleton_column) {
685 const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
686
687 // If no singleton columns have entered the basis for this row, choose the
688 // first one. It will be the one with the least decrease in the objective
689 // function when it leaves the basis.
690 if ((*basis)[row] == kInvalidCol) {
691 (*basis)[row] = col;
692 }
693
694 // If there is already no error in this row (i.e. it is primal-feasible),
695 // there is nothing to do.
696 if (error_[row] == 0.0) continue;
697
698 // In this case, all the infeasibility can be "absorbed" and this variable
699 // may not be at one of its bound anymore, so we have to use it in the
700 // basis.
701 const Fractional coeff =
702 compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
703 const Fractional new_value = variable_values[col] + error_[row] / coeff;
704 if (new_value >= lower_bound_[col] && new_value <= upper_bound_[col]) {
705 error_[row] = 0.0;
706
707 // Use this variable in the initial basis.
708 (*basis)[row] = col;
709 continue;
710 }
711
712 // The idea here is that if the singleton column cannot be used to "absorb"
713 // all error_[row], if it is boxed, it can still be used to make the
714 // infeasibility smaller (with a bound flip).
715 const Fractional box_width = variables_info_.GetBoundDifference(col);
716 DCHECK_NE(box_width, 0.0);
717 DCHECK_NE(error_[row], 0.0);
718 const Fractional error_sign = error_[row] / coeff;
719 if (variable_values[col] == lower_bound_[col] && error_sign > 0.0) {
720 DCHECK(IsFinite(box_width));
721 error_[row] -= coeff * box_width;
722 SetNonBasicVariableStatusAndDeriveValue(col,
724 continue;
725 }
726 if (variable_values[col] == upper_bound_[col] && error_sign < 0.0) {
727 DCHECK(IsFinite(box_width));
728 error_[row] += coeff * box_width;
729 SetNonBasicVariableStatusAndDeriveValue(col,
731 continue;
732 }
733 }
734}
735
736bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
737 const LinearProgram& lp, bool* only_change_is_new_rows,
738 bool* only_change_is_new_cols, ColIndex* num_new_cols) {
739 SCOPED_TIME_STAT(&function_stats_);
740 DCHECK(only_change_is_new_rows != nullptr);
741 DCHECK(only_change_is_new_cols != nullptr);
742 DCHECK(num_new_cols != nullptr);
743 DCHECK_NE(kInvalidCol, lp.GetFirstSlackVariable());
744 DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
745 DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
746
747 DCHECK_EQ(lp.num_variables(),
748 lp.GetFirstSlackVariable() + RowToColIndex(lp.num_constraints()));
749 DCHECK(IsRightMostSquareMatrixIdentity(lp.GetSparseMatrix()));
750 const bool old_part_of_matrix_is_unchanged =
752 num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
753
754 // Test if the matrix is unchanged, and if yes, just returns true. Note that
755 // this doesn't check the columns corresponding to the slack variables,
756 // because they were checked by lp.IsInEquationForm() when Solve() was called.
757 if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
758 lp.num_variables() == num_cols_) {
759 return true;
760 }
761
762 // Check if the new matrix can be derived from the old one just by adding
763 // new rows (i.e new constraints).
764 *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
765 lp.num_constraints() > num_rows_ &&
766 lp.GetFirstSlackVariable() == first_slack_col_;
767
768 // Check if the new matrix can be derived from the old one just by adding
769 // new columns (i.e new variables).
770 *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
771 lp.num_constraints() == num_rows_ &&
772 lp.GetFirstSlackVariable() > first_slack_col_;
773 *num_new_cols =
774 *only_change_is_new_cols ? lp.num_variables() - num_cols_ : ColIndex(0);
775
776 // Initialize first_slack_.
777 first_slack_col_ = lp.GetFirstSlackVariable();
778
779 // Initialize the new dimensions.
780 num_rows_ = lp.num_constraints();
781 num_cols_ = lp.num_variables();
782
783 // Populate compact_matrix_ and transposed_matrix_ if needed. Note that we
784 // already added all the slack variables at this point, so matrix_ will not
785 // change anymore.
786 // TODO(user): This can be sped up by removing the MatrixView.
787 compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
788 if (parameters_.use_transposed_matrix()) {
789 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
790 }
791 return false;
792}
793
794bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
795 const LinearProgram& lp, ColIndex num_new_cols) {
796 SCOPED_TIME_STAT(&function_stats_);
797 DCHECK_EQ(lp.num_variables(), num_cols_);
798 DCHECK_LE(num_new_cols, first_slack_col_);
799 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
800
801 // Check the original variable bounds.
802 for (ColIndex col(0); col < first_new_col; ++col) {
803 if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
804 upper_bound_[col] != lp.variable_upper_bounds()[col]) {
805 return false;
806 }
807 }
808 // Check that each new variable has a bound of zero.
809 for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
810 if (lp.variable_lower_bounds()[col] != 0.0 &&
811 lp.variable_upper_bounds()[col] != 0.0) {
812 return false;
813 }
814 }
815 // Check that the slack bounds are unchanged.
816 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
817 if (lower_bound_[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
818 upper_bound_[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
819 return false;
820 }
821 }
822 return true;
823}
824
825bool RevisedSimplex::InitializeBoundsAndTestIfUnchanged(
826 const LinearProgram& lp) {
827 SCOPED_TIME_STAT(&function_stats_);
828 lower_bound_.resize(num_cols_, 0.0);
829 upper_bound_.resize(num_cols_, 0.0);
830
831 // Variable bounds, for both non-slack and slack variables.
832 bool bounds_are_unchanged = true;
833 DCHECK_EQ(lp.num_variables(), num_cols_);
834 for (ColIndex col(0); col < lp.num_variables(); ++col) {
835 if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
836 upper_bound_[col] != lp.variable_upper_bounds()[col]) {
837 bounds_are_unchanged = false;
838 break;
839 }
840 }
841 if (!bounds_are_unchanged) {
842 lower_bound_ = lp.variable_lower_bounds();
843 upper_bound_ = lp.variable_upper_bounds();
844 }
845 return bounds_are_unchanged;
846}
847
848bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
849 const LinearProgram& lp) {
850 SCOPED_TIME_STAT(&function_stats_);
851
852 bool objective_is_unchanged = true;
853 objective_.resize(num_cols_, 0.0);
854 DCHECK_EQ(num_cols_, lp.num_variables());
855 if (lp.IsMaximizationProblem()) {
856 // Note that we use the minimization version of the objective internally.
857 for (ColIndex col(0); col < lp.num_variables(); ++col) {
858 const Fractional coeff = -lp.objective_coefficients()[col];
859 if (objective_[col] != coeff) {
860 objective_is_unchanged = false;
861 }
862 objective_[col] = coeff;
863 }
864 objective_offset_ = -lp.objective_offset();
865 objective_scaling_factor_ = -lp.objective_scaling_factor();
866 } else {
867 for (ColIndex col(0); col < lp.num_variables(); ++col) {
868 if (objective_[col] != lp.objective_coefficients()[col]) {
869 objective_is_unchanged = false;
870 break;
871 }
872 }
873 if (!objective_is_unchanged) {
874 objective_ = lp.objective_coefficients();
875 }
876 objective_offset_ = lp.objective_offset();
877 objective_scaling_factor_ = lp.objective_scaling_factor();
878 }
879 return objective_is_unchanged;
880}
881
882void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
883 objective_limit_reached_ = false;
884 DCHECK(std::isfinite(objective_offset_));
885 DCHECK(std::isfinite(objective_scaling_factor_));
886 DCHECK_NE(0.0, objective_scaling_factor_);
887
888 // This sets dual_objective_limit_ and then primal_objective_limit_.
889 for (const bool set_dual : {true, false}) {
890 // NOTE(user): If objective_scaling_factor_ is negative, the optimization
891 // direction was reversed (during preprocessing or inside revised simplex),
892 // i.e., the original problem is maximization. In such case the _meaning_ of
893 // the lower and upper limits is swapped. To this end we must change the
894 // signs of limits, which happens automatically when calculating shifted
895 // limits. We must also use upper (resp. lower) limit in place of lower
896 // (resp. upper) limit when calculating the final objective_limit_.
897 //
898 // Choose lower limit if using the dual simplex and scaling factor is
899 // negative or if using the primal simplex and scaling is nonnegative, upper
900 // limit otherwise.
901 const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
902 ? parameters_.objective_lower_limit()
903 : parameters_.objective_upper_limit();
904 const Fractional shifted_limit =
905 limit / objective_scaling_factor_ - objective_offset_;
906 if (set_dual) {
907 dual_objective_limit_ = shifted_limit;
908 } else {
909 primal_objective_limit_ = shifted_limit;
910 }
911 }
912}
913
914void RevisedSimplex::InitializeVariableStatusesForWarmStart(
915 const BasisState& state, ColIndex num_new_cols) {
916 variables_info_.InitializeAndComputeType();
917 RowIndex num_basic_variables(0);
918 DCHECK_LE(num_new_cols, first_slack_col_);
919 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
920 // Compute the status for all the columns (note that the slack variables are
921 // already added at the end of the matrix at this stage).
922 for (ColIndex col(0); col < num_cols_; ++col) {
923 const VariableStatus default_status = ComputeDefaultVariableStatus(col);
924
925 // Start with the given "warm" status from the BasisState if it exists.
926 VariableStatus status = default_status;
927 if (col < first_new_col && col < state.statuses.size()) {
928 status = state.statuses[col];
929 } else if (col >= first_slack_col_ &&
930 col - num_new_cols < state.statuses.size()) {
931 status = state.statuses[col - num_new_cols];
932 }
933
934 if (status == VariableStatus::BASIC) {
935 // Do not allow more than num_rows_ VariableStatus::BASIC variables.
936 if (num_basic_variables == num_rows_) {
937 VLOG(1) << "Too many basic variables in the warm-start basis."
938 << "Only keeping the first ones as VariableStatus::BASIC.";
939 variables_info_.UpdateToNonBasicStatus(col, default_status);
940 } else {
941 ++num_basic_variables;
942 variables_info_.UpdateToBasicStatus(col);
943 }
944 } else {
945 // Remove incompatibilities between the warm status and the variable
946 // bounds. We use the default status as an indication of the bounds
947 // type.
948 if ((status != default_status) &&
949 ((default_status == VariableStatus::FIXED_VALUE) ||
950 (status == VariableStatus::FREE) ||
951 (status == VariableStatus::FIXED_VALUE) ||
953 lower_bound_[col] == -kInfinity) ||
955 upper_bound_[col] == kInfinity))) {
956 status = default_status;
957 }
958 variables_info_.UpdateToNonBasicStatus(col, status);
959 }
960 }
961
962 // Initialize the values.
963 variable_values_.ResetAllNonBasicVariableValues();
964}
965
966// This implementation starts with an initial matrix B equal to the identity
967// matrix (modulo a column permutation). For that it uses either the slack
968// variables or the singleton columns present in the problem. Afterwards, the
969// fixed slacks in the basis are exchanged with normal columns of A if possible
970// by the InitialBasis class.
971Status RevisedSimplex::CreateInitialBasis() {
972 SCOPED_TIME_STAT(&function_stats_);
973
974 // Initialize the variable values and statuses.
975 // Note that for the dual algorithm, boxed variables will be made
976 // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
977 // really matter at which of their two finite bounds they start.
978 int num_free_variables = 0;
979 variables_info_.InitializeAndComputeType();
980 for (ColIndex col(0); col < num_cols_; ++col) {
981 const VariableStatus status = ComputeDefaultVariableStatus(col);
982 SetNonBasicVariableStatusAndDeriveValue(col, status);
983 if (status == VariableStatus::FREE) ++num_free_variables;
984 }
985 VLOG(1) << "Number of free variables in the problem: " << num_free_variables;
986
987 // Start by using an all-slack basis.
988 RowToColMapping basis(num_rows_, kInvalidCol);
989 for (RowIndex row(0); row < num_rows_; ++row) {
990 basis[row] = SlackColIndex(row);
991 }
992
993 // If possible, for the primal simplex we replace some slack variables with
994 // some singleton columns present in the problem.
995 if (!parameters_.use_dual_simplex() &&
996 parameters_.initial_basis() != GlopParameters::MAROS &&
997 parameters_.exploit_singleton_column_in_initial_basis()) {
998 // For UseSingletonColumnInInitialBasis() to work better, we change
999 // the value of the boxed singleton column with a non-zero cost to the best
1000 // of their two bounds.
1001 for (ColIndex col(0); col < num_cols_; ++col) {
1002 if (compact_matrix_.column(col).num_entries() != 1) continue;
1003 const VariableStatus status = variables_info_.GetStatusRow()[col];
1004 const Fractional objective = objective_[col];
1005 if (objective > 0 && IsFinite(lower_bound_[col]) &&
1007 SetNonBasicVariableStatusAndDeriveValue(col,
1009 } else if (objective < 0 && IsFinite(upper_bound_[col]) &&
1011 SetNonBasicVariableStatusAndDeriveValue(col,
1013 }
1014 }
1015
1016 // Compute the primal infeasibility of the initial variable values in
1017 // error_.
1018 ComputeVariableValuesError();
1019
1020 // TODO(user): A better but slightly more complex algorithm would be to:
1021 // - Ignore all singleton columns except the slacks during phase I.
1022 // - For this, change the slack variable bounds accordingly.
1023 // - At the end of phase I, restore the slack variable bounds and perform
1024 // the same algorithm to start with feasible and "optimal" values of the
1025 // singleton columns.
1026 basis.assign(num_rows_, kInvalidCol);
1027 UseSingletonColumnInInitialBasis(&basis);
1028
1029 // Eventually complete the basis with fixed slack columns.
1030 for (RowIndex row(0); row < num_rows_; ++row) {
1031 if (basis[row] == kInvalidCol) {
1032 basis[row] = SlackColIndex(row);
1033 }
1034 }
1035 }
1036
1037 // Use an advanced initial basis to remove the fixed variables from the basis.
1038 if (parameters_.initial_basis() == GlopParameters::NONE) {
1039 return InitializeFirstBasis(basis);
1040 }
1041 if (parameters_.initial_basis() == GlopParameters::MAROS) {
1042 InitialBasis initial_basis(compact_matrix_, objective_, lower_bound_,
1043 upper_bound_, variables_info_.GetTypeRow());
1044 if (parameters_.use_dual_simplex()) {
1045 // This dual version only uses zero-cost columns to complete the
1046 // basis.
1047 initial_basis.GetDualMarosBasis(num_cols_, &basis);
1048 } else {
1049 initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1050 }
1051 int number_changed = 0;
1052 for (RowIndex row(0); row < num_rows_; ++row) {
1053 if (basis[row] != SlackColIndex(row)) {
1054 number_changed++;
1055 }
1056 }
1057 VLOG(1) << "Number of Maros basis changes: " << number_changed;
1058 } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1059 parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1060 // First unassign the fixed variables from basis.
1061 int num_fixed_variables = 0;
1062 for (RowIndex row(0); row < basis.size(); ++row) {
1063 const ColIndex col = basis[row];
1064 if (lower_bound_[col] == upper_bound_[col]) {
1065 basis[row] = kInvalidCol;
1066 ++num_fixed_variables;
1067 }
1068 }
1069
1070 if (num_fixed_variables == 0) {
1071 VLOG(1) << "Crash is set to " << parameters_.initial_basis()
1072 << " but there is no equality rows to remove from initial all "
1073 "slack basis.";
1074 } else {
1075 // Then complete the basis with an advanced initial basis algorithm.
1076 VLOG(1) << "Trying to remove " << num_fixed_variables
1077 << " fixed variables from the initial basis.";
1078 InitialBasis initial_basis(compact_matrix_, objective_, lower_bound_,
1079 upper_bound_, variables_info_.GetTypeRow());
1080
1081 if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1082 if (parameters_.use_scaling()) {
1083 initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1084 } else {
1085 VLOG(1) << "Bixby initial basis algorithm requires the problem "
1086 << "to be scaled. Skipping Bixby's algorithm.";
1087 }
1088 } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1089 // Note the use of num_cols_ here because this algorithm
1090 // benefits from treating fixed slack columns like any other column.
1091 if (parameters_.use_dual_simplex()) {
1092 // This dual version only uses zero-cost columns to complete the
1093 // basis.
1094 initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1095 } else {
1096 initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1097 }
1098
1099 const Status status = InitializeFirstBasis(basis);
1100 if (status.ok()) {
1101 return status;
1102 } else {
1103 VLOG(1) << "Reverting to all slack basis.";
1104
1105 for (RowIndex row(0); row < num_rows_; ++row) {
1106 basis[row] = SlackColIndex(row);
1107 }
1108 }
1109 }
1110 }
1111 } else {
1112 LOG(WARNING) << "Unsupported initial_basis parameters: "
1113 << parameters_.initial_basis();
1114 }
1115
1116 return InitializeFirstBasis(basis);
1117}
1118
1119Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1120 basis_ = basis;
1121
1122 // For each row which does not have a basic column, assign it to the
1123 // corresponding slack column.
1124 basis_.resize(num_rows_, kInvalidCol);
1125 for (RowIndex row(0); row < num_rows_; ++row) {
1126 if (basis_[row] == kInvalidCol) {
1127 basis_[row] = SlackColIndex(row);
1128 }
1129 }
1130
1131 GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1132 PermuteBasis();
1133
1134 // Test that the upper bound on the condition number of basis is not too high.
1135 // The number was not computed by any rigorous analysis, we just prefer to
1136 // revert to the all slack basis if the condition number of our heuristic
1137 // first basis seems bad. See for instance on cond11.mps, where we get an
1138 // infinity upper bound.
1139 const Fractional condition_number_ub =
1140 basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1141 if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1142 const std::string error_message =
1143 absl::StrCat("The matrix condition number upper bound is too high: ",
1144 condition_number_ub);
1145 VLOG(1) << error_message;
1146 return Status(Status::ERROR_LU, error_message);
1147 }
1148
1149 // Everything is okay, finish the initialization.
1150 for (RowIndex row(0); row < num_rows_; ++row) {
1151 variables_info_.Update(basis_[row], VariableStatus::BASIC);
1152 }
1153 DCHECK(BasisIsConsistent());
1154
1155 // TODO(user): Maybe return an error status if this is too high. Note however
1156 // that if we want to do that, we need to reset variables_info_ to a
1157 // consistent state.
1158 variable_values_.RecomputeBasicVariableValues();
1159 if (VLOG_IS_ON(1)) {
1160 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1161 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1162 VLOG(1) << absl::StrCat(
1163 "The primal residual of the initial basis is above the tolerance, ",
1164 variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1165 }
1166 }
1167 return Status::OK();
1168}
1169
1170Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1171 parameters_ = initial_parameters_;
1172 PropagateParameters();
1173
1174 // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1175 // this is where num_rows_ and num_cols_ are computed.
1176 //
1177 // Note that these functions can't depend on use_dual_simplex() since we may
1178 // change it below.
1179 ColIndex num_new_cols(0);
1180 bool only_change_is_new_rows = false;
1181 bool only_change_is_new_cols = false;
1182 bool matrix_is_unchanged = true;
1183 bool only_new_bounds = false;
1184 if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1185 matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1186 lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols);
1187 only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1188 OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1189 lp, num_new_cols);
1190 } else if (DEBUG_MODE) {
1191 CHECK(InitializeMatrixAndTestIfUnchanged(
1192 lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols));
1193 }
1194 notify_that_matrix_is_unchanged_ = false;
1195 const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1196 const bool bounds_are_unchanged = InitializeBoundsAndTestIfUnchanged(lp);
1197
1198 // If parameters_.allow_simplex_algorithm_change() is true and we already have
1199 // a primal (resp. dual) feasible solution, then we use the primal (resp.
1200 // dual) algorithm since there is a good chance that it will be faster.
1201 if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1202 if (objective_is_unchanged && !bounds_are_unchanged) {
1203 parameters_.set_use_dual_simplex(true);
1204 PropagateParameters();
1205 }
1206 if (bounds_are_unchanged && !objective_is_unchanged) {
1207 parameters_.set_use_dual_simplex(false);
1208 PropagateParameters();
1209 }
1210 }
1211
1212 InitializeObjectiveLimit(lp);
1213
1214 // Computes the variable name as soon as possible for logging.
1215 // TODO(user): do we really need to store them? we could just compute them
1216 // on the fly since we do not need the speed.
1217 if (VLOG_IS_ON(1)) {
1218 SetVariableNames();
1219 }
1220
1221 // Warm-start? This is supported only if the solution_state_ is non empty,
1222 // i.e., this revised simplex i) was already used to solve a problem, or
1223 // ii) the solution state was provided externally. Note that the
1224 // solution_state_ may have nothing to do with the current problem, e.g.,
1225 // objective, matrix, and/or bounds had changed. So we support several
1226 // scenarios of warm-start depending on how did the problem change and which
1227 // simplex algorithm is used (primal or dual).
1228 bool solve_from_scratch = true;
1229
1230 // Try to perform a "quick" warm-start with no matrix factorization involved.
1231 if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1232 if (!parameters_.use_dual_simplex()) {
1233 // With primal simplex, always clear dual norms and dual pricing.
1234 // Incrementality is supported only if only change to the matrix and
1235 // bounds is adding new columns (objective may change), and that all
1236 // new columns have a bound equal to zero.
1237 dual_edge_norms_.Clear();
1238 dual_pricing_vector_.clear();
1239 if (matrix_is_unchanged && bounds_are_unchanged) {
1240 // TODO(user): Do not do that if objective_is_unchanged. Currently
1241 // this seems to break something. Investigate.
1242 reduced_costs_.ClearAndRemoveCostShifts();
1243 solve_from_scratch = false;
1244 } else if (only_change_is_new_cols && only_new_bounds) {
1245 InitializeVariableStatusesForWarmStart(solution_state_, num_new_cols);
1246 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1247 for (ColIndex& col_ref : basis_) {
1248 if (col_ref >= first_new_col) {
1249 col_ref += num_new_cols;
1250 }
1251 }
1252
1253 // Make sure the primal edge norm are recomputed from scratch.
1254 // TODO(user): only the norms of the new columns actually need to be
1255 // computed.
1256 primal_edge_norms_.Clear();
1257 reduced_costs_.ClearAndRemoveCostShifts();
1258 solve_from_scratch = false;
1259 }
1260 } else {
1261 // With dual simplex, always clear primal norms. Incrementality is
1262 // supported only if the objective remains the same (the matrix may
1263 // contain new rows and the bounds may change).
1264 primal_edge_norms_.Clear();
1265 if (objective_is_unchanged) {
1266 if (matrix_is_unchanged) {
1267 if (!bounds_are_unchanged) {
1268 InitializeVariableStatusesForWarmStart(solution_state_,
1269 ColIndex(0));
1270 variable_values_.RecomputeBasicVariableValues();
1271 }
1272 solve_from_scratch = false;
1273 } else if (only_change_is_new_rows) {
1274 // For the dual-simplex, we also perform a warm start if a couple of
1275 // new rows where added.
1276 InitializeVariableStatusesForWarmStart(solution_state_, ColIndex(0));
1277 dual_edge_norms_.ResizeOnNewRows(num_rows_);
1278
1279 // TODO(user): The reduced costs do not really need to be recomputed.
1280 // We just need to initialize the ones of the new slack variables to
1281 // 0.
1282 reduced_costs_.ClearAndRemoveCostShifts();
1283 dual_pricing_vector_.clear();
1284
1285 // Note that this needs to be done after the Clear() calls above.
1286 if (InitializeFirstBasis(basis_).ok()) {
1287 solve_from_scratch = false;
1288 }
1289 }
1290 }
1291 }
1292 }
1293
1294 // If we couldn't perform a "quick" warm start above, we can at least try to
1295 // reuse the variable statuses.
1296 const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
1297 if (solve_from_scratch && !solution_state_.IsEmpty()) {
1298 // If an external basis has been provided or if the matrix changed, we need
1299 // to perform more work, e.g., factorize the proposed basis and validate it.
1300 InitializeVariableStatusesForWarmStart(solution_state_, ColIndex(0));
1301 basis_.assign(num_rows_, kInvalidCol);
1302 RowIndex row(0);
1303 for (ColIndex col : variables_info_.GetIsBasicBitRow()) {
1304 basis_[row] = col;
1305 ++row;
1306 }
1307
1308 basis_factorization_.Clear();
1309 reduced_costs_.ClearAndRemoveCostShifts();
1310 primal_edge_norms_.Clear();
1311 dual_edge_norms_.Clear();
1312 dual_pricing_vector_.clear();
1313
1314 // TODO(user): If the basis is incomplete, we could complete it with
1315 // better slack variables than is done by InitializeFirstBasis() by
1316 // using a partial LU decomposition (see markowitz.h).
1317 if (InitializeFirstBasis(basis_).ok()) {
1318 solve_from_scratch = false;
1319 } else {
1320 if (log_info) {
1321 LOG(INFO) << "RevisedSimplex is not using the warm start "
1322 "basis because it is not factorizable.";
1323 }
1324 }
1325 }
1326
1327 if (solve_from_scratch) {
1328 if (log_info) LOG(INFO) << "Solve from scratch.";
1329 basis_factorization_.Clear();
1330 reduced_costs_.ClearAndRemoveCostShifts();
1331 primal_edge_norms_.Clear();
1332 dual_edge_norms_.Clear();
1333 dual_pricing_vector_.clear();
1334 GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1335 } else {
1336 if (log_info) LOG(INFO) << "Incremental solve.";
1337 }
1338 DCHECK(BasisIsConsistent());
1339 return Status::OK();
1340}
1341
1342void RevisedSimplex::DisplayBasicVariableStatistics() {
1343 SCOPED_TIME_STAT(&function_stats_);
1344
1345 int num_fixed_variables = 0;
1346 int num_free_variables = 0;
1347 int num_variables_at_bound = 0;
1348 int num_slack_variables = 0;
1349 int num_infeasible_variables = 0;
1350
1351 const DenseRow& variable_values = variable_values_.GetDenseRow();
1352 const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1353 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1354 for (RowIndex row(0); row < num_rows_; ++row) {
1355 const ColIndex col = basis_[row];
1356 const Fractional value = variable_values[col];
1357 if (variable_types[col] == VariableType::UNCONSTRAINED) {
1358 ++num_free_variables;
1359 }
1360 if (value > upper_bound_[col] + tolerance ||
1361 value < lower_bound_[col] - tolerance) {
1362 ++num_infeasible_variables;
1363 }
1364 if (col >= first_slack_col_) {
1365 ++num_slack_variables;
1366 }
1367 if (lower_bound_[col] == upper_bound_[col]) {
1368 ++num_fixed_variables;
1369 } else if (variable_values[col] == lower_bound_[col] ||
1370 variable_values[col] == upper_bound_[col]) {
1371 ++num_variables_at_bound;
1372 }
1373 }
1374
1375 VLOG(1) << "Basis size: " << num_rows_;
1376 VLOG(1) << "Number of basic infeasible variables: "
1377 << num_infeasible_variables;
1378 VLOG(1) << "Number of basic slack variables: " << num_slack_variables;
1379 VLOG(1) << "Number of basic variables at bound: " << num_variables_at_bound;
1380 VLOG(1) << "Number of basic fixed variables: " << num_fixed_variables;
1381 VLOG(1) << "Number of basic free variables: " << num_free_variables;
1382}
1383
1384void RevisedSimplex::SaveState() {
1385 DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1386 solution_state_.statuses = variables_info_.GetStatusRow();
1387 solution_state_has_been_set_externally_ = false;
1388}
1389
1390RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1391 DenseBooleanColumn contains_data(num_rows_, false);
1392 for (ColIndex col(0); col < num_cols_; ++col) {
1393 for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1394 contains_data[e.row()] = true;
1395 }
1396 }
1397 RowIndex num_empty_rows(0);
1398 for (RowIndex row(0); row < num_rows_; ++row) {
1399 if (!contains_data[row]) {
1400 ++num_empty_rows;
1401 VLOG(1) << "Row " << row << " is empty.";
1402 }
1403 }
1404 return num_empty_rows;
1405}
1406
1407ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1408 ColIndex num_empty_cols(0);
1409 for (ColIndex col(0); col < num_cols_; ++col) {
1410 if (compact_matrix_.column(col).IsEmpty()) {
1411 ++num_empty_cols;
1412 VLOG(1) << "Column " << col << " is empty.";
1413 }
1414 }
1415 return num_empty_cols;
1416}
1417
1418void RevisedSimplex::CorrectErrorsOnVariableValues() {
1419 SCOPED_TIME_STAT(&function_stats_);
1420 DCHECK(basis_factorization_.IsRefactorized());
1421
1422 // TODO(user): The primal residual error does not change if we take degenerate
1423 // steps or if we do not change the variable values. No need to recompute it
1424 // in this case.
1425 const Fractional primal_residual =
1426 variable_values_.ComputeMaximumPrimalResidual();
1427
1428 // If the primal_residual is within the tolerance, no need to recompute
1429 // the basic variable values with a better precision.
1430 if (primal_residual >= parameters_.harris_tolerance_ratio() *
1431 parameters_.primal_feasibility_tolerance()) {
1432 variable_values_.RecomputeBasicVariableValues();
1433 VLOG(1) << "Primal infeasibility (bounds error) = "
1434 << variable_values_.ComputeMaximumPrimalInfeasibility()
1435 << ", Primal residual |A.x - b| = "
1436 << variable_values_.ComputeMaximumPrimalResidual();
1437 }
1438}
1439
1440void RevisedSimplex::ComputeVariableValuesError() {
1441 SCOPED_TIME_STAT(&function_stats_);
1442 error_.AssignToZero(num_rows_);
1443 const DenseRow& variable_values = variable_values_.GetDenseRow();
1444 for (ColIndex col(0); col < num_cols_; ++col) {
1445 const Fractional value = variable_values[col];
1446 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1447 }
1448}
1449
1450void RevisedSimplex::ComputeDirection(ColIndex col) {
1451 SCOPED_TIME_STAT(&function_stats_);
1453 basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1454 direction_infinity_norm_ = 0.0;
1455 if (direction_.non_zeros.empty()) {
1456 // We still compute the direction non-zeros because our code relies on it.
1457 for (RowIndex row(0); row < num_rows_; ++row) {
1458 const Fractional value = direction_[row];
1459 if (value != 0.0) {
1460 direction_.non_zeros.push_back(row);
1461 direction_infinity_norm_ =
1462 std::max(direction_infinity_norm_, std::abs(value));
1463 }
1464 }
1465 } else {
1466 for (const auto e : direction_) {
1467 direction_infinity_norm_ =
1468 std::max(direction_infinity_norm_, std::abs(e.coefficient()));
1469 }
1470 }
1471 IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1472 num_rows_ == 0 ? 0.0
1473 : static_cast<double>(direction_.non_zeros.size()) /
1474 static_cast<double>(num_rows_.value())));
1475}
1476
1477Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1478 SCOPED_TIME_STAT(&function_stats_);
1479 compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1480 for (const auto e : direction_) {
1481 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1482 &error_);
1483 }
1484 return InfinityNorm(error_);
1485}
1486
1487template <bool is_entering_reduced_cost_positive>
1488Fractional RevisedSimplex::GetRatio(RowIndex row) const {
1489 const ColIndex col = basis_[row];
1490 const Fractional direction = direction_[row];
1491 const Fractional value = variable_values_.Get(col);
1492 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1493 DCHECK_NE(direction, 0.0);
1494 if (is_entering_reduced_cost_positive) {
1495 if (direction > 0.0) {
1496 return (upper_bound_[col] - value) / direction;
1497 } else {
1498 return (lower_bound_[col] - value) / direction;
1499 }
1500 } else {
1501 if (direction > 0.0) {
1502 return (value - lower_bound_[col]) / direction;
1503 } else {
1504 return (value - upper_bound_[col]) / direction;
1505 }
1506 }
1507}
1508
1509template <bool is_entering_reduced_cost_positive>
1510Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1511 Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1512 SCOPED_TIME_STAT(&function_stats_);
1513 const Fractional harris_tolerance =
1514 parameters_.harris_tolerance_ratio() *
1515 parameters_.primal_feasibility_tolerance();
1516 const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1517 parameters_.primal_feasibility_tolerance();
1518
1519 // Initially, we can skip any variable with a ratio greater than
1520 // bound_flip_ratio since it seems to be always better to choose the
1521 // bound-flip over such leaving variable.
1522 Fractional harris_ratio = bound_flip_ratio;
1523 leaving_candidates->Clear();
1524
1525 // If the basis is refactorized, then we should have everything with a good
1526 // precision, so we only consider "acceptable" pivots. Otherwise we consider
1527 // all the entries, and if the algorithm return a pivot that is too small, we
1528 // will refactorize and recompute the relevant quantities.
1529 const Fractional threshold = basis_factorization_.IsRefactorized()
1530 ? parameters_.minimum_acceptable_pivot()
1531 : parameters_.ratio_test_zero_threshold();
1532
1533 for (const auto e : direction_) {
1534 const Fractional magnitude = std::abs(e.coefficient());
1535 if (magnitude <= threshold) continue;
1536 const Fractional ratio =
1537 GetRatio<is_entering_reduced_cost_positive>(e.row());
1538 if (ratio <= harris_ratio) {
1539 leaving_candidates->SetCoefficient(e.row(), ratio);
1540
1541 // The second max() makes sure harris_ratio is lower bounded by a small
1542 // positive value. The more classical approach is to bound it by 0.0 but
1543 // since we will always perform a small positive step, we allow any
1544 // variable to go a bit more out of bound (even if it is past the harris
1545 // tolerance). This increase the number of candidates and allows us to
1546 // choose a more numerically stable pivot.
1547 //
1548 // Note that at least lower bounding it by 0.0 is really important on
1549 // numerically difficult problems because its helps in the choice of a
1550 // stable pivot.
1551 harris_ratio = std::min(harris_ratio,
1552 std::max(minimum_delta / magnitude,
1553 ratio + harris_tolerance / magnitude));
1554 }
1555 }
1556 return harris_ratio;
1557}
1558
1559namespace {
1560
1561// Returns true if the candidate ratio is supposed to be more stable than the
1562// current ratio (or if the two are equal).
1563// The idea here is to take, by order of preference:
1564// - the minimum positive ratio in order to intoduce a primal infeasibility
1565// which is as small as possible.
1566// - or the least negative one in order to have the smallest bound shift
1567// possible on the leaving variable.
1568bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1569 if (current >= 0.0) {
1570 return candidate >= 0.0 && candidate <= current;
1571 } else {
1572 return candidate >= current;
1573 }
1574}
1575
1576} // namespace
1577
1578// Ratio-test or Quotient-test. Choose the row of the leaving variable.
1579// Known as CHUZR or CHUZRO in FORTRAN codes.
1580Status RevisedSimplex::ChooseLeavingVariableRow(
1581 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1582 RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1583 SCOPED_TIME_STAT(&function_stats_);
1584 GLOP_RETURN_ERROR_IF_NULL(refactorize);
1585 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1586 GLOP_RETURN_ERROR_IF_NULL(step_length);
1587 DCHECK_COL_BOUNDS(entering_col);
1588 DCHECK_NE(0.0, reduced_cost);
1589
1590 // A few cases will cause the test to be recomputed from the beginning.
1591 int stats_num_leaving_choices = 0;
1592 equivalent_leaving_choices_.clear();
1593 while (true) {
1594 stats_num_leaving_choices = 0;
1595
1596 // We initialize current_ratio with the maximum step the entering variable
1597 // can take (bound-flip). Note that we do not use tolerance here.
1598 const Fractional entering_value = variable_values_.Get(entering_col);
1599 Fractional current_ratio =
1600 (reduced_cost > 0.0) ? entering_value - lower_bound_[entering_col]
1601 : upper_bound_[entering_col] - entering_value;
1602 DCHECK_GT(current_ratio, 0.0);
1603
1604 // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1605 // actually computes the minimum leaving ratio of all the variables. This is
1606 // the same as the 'classic' ratio test.
1607 const Fractional harris_ratio =
1608 (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1609 current_ratio, &leaving_candidates_)
1610 : ComputeHarrisRatioAndLeavingCandidates<false>(
1611 current_ratio, &leaving_candidates_);
1612
1613 // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1614 // variable too much out of bounds), we take it as it is always stable and
1615 // fast.
1616 if (current_ratio <= harris_ratio) {
1617 *leaving_row = kInvalidRow;
1618 *step_length = current_ratio;
1619 break;
1620 }
1621
1622 // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1623 // <= harris_ratio', we choose the leaving row with the largest coefficient.
1624 //
1625 // This has a big impact, because picking a leaving variable with a small
1626 // direction_[row] is the main source of Abnormal LU errors.
1627 Fractional pivot_magnitude = 0.0;
1628 stats_num_leaving_choices = 0;
1629 *leaving_row = kInvalidRow;
1630 equivalent_leaving_choices_.clear();
1631 for (const SparseColumn::Entry e : leaving_candidates_) {
1632 const Fractional ratio = e.coefficient();
1633 if (ratio > harris_ratio) continue;
1634 ++stats_num_leaving_choices;
1635 const RowIndex row = e.row();
1636
1637 // If the magnitudes are the same, we choose the leaving variable with
1638 // what is probably the more stable ratio, see
1639 // IsRatioMoreOrEquallyStable().
1640 const Fractional candidate_magnitude = std::abs(direction_[row]);
1641 if (candidate_magnitude < pivot_magnitude) continue;
1642 if (candidate_magnitude == pivot_magnitude) {
1643 if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1644 if (ratio == current_ratio) {
1645 DCHECK_NE(kInvalidRow, *leaving_row);
1646 equivalent_leaving_choices_.push_back(row);
1647 continue;
1648 }
1649 }
1650 equivalent_leaving_choices_.clear();
1651 current_ratio = ratio;
1652 pivot_magnitude = candidate_magnitude;
1653 *leaving_row = row;
1654 }
1655
1656 // Break the ties randomly.
1657 if (!equivalent_leaving_choices_.empty()) {
1658 equivalent_leaving_choices_.push_back(*leaving_row);
1659 *leaving_row =
1660 equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1661 0, equivalent_leaving_choices_.size() - 1)(random_)];
1662 }
1663
1664 // Since we took care of the bound-flip at the beginning, at this point
1665 // we have a valid leaving row.
1666 DCHECK_NE(kInvalidRow, *leaving_row);
1667
1668 // A variable already outside one of its bounds +/- tolerance is considered
1669 // at its bound and its ratio is zero. Not doing this may lead to a step
1670 // that moves the objective in the wrong direction. We may want to allow
1671 // such steps, but then we will need to check that it doesn't break the
1672 // bounds of the other variables.
1673 if (current_ratio <= 0.0) {
1674 // Instead of doing a zero step, we do a small positive step. This
1675 // helps on degenerate problems.
1676 const Fractional minimum_delta =
1677 parameters_.degenerate_ministep_factor() *
1678 parameters_.primal_feasibility_tolerance();
1679 *step_length = minimum_delta / pivot_magnitude;
1680 } else {
1681 *step_length = current_ratio;
1682 }
1683
1684 // Note(user): Testing the pivot at each iteration is useful for debugging
1685 // an LU factorization problem. Remove the false if you need to investigate
1686 // this, it makes sure that this will be compiled away.
1687 if (/* DISABLES CODE */ (false)) {
1688 TestPivot(entering_col, *leaving_row);
1689 }
1690
1691 // We try various "heuristics" to avoid a small pivot.
1692 //
1693 // The smaller 'direction_[*leaving_row]', the less precise
1694 // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1695 // ill-conditioned bases or even to matrices that are not a basis at all if
1696 // the actual (infinite-precision) coefficient is zero.
1697 //
1698 // TODO(user): We may have to choose another entering column if
1699 // we cannot prevent pivoting by a small pivot.
1700 // (Chvatal, p.115, about epsilon2.)
1701 if (pivot_magnitude <
1702 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1703 // The first countermeasure is to recompute everything to the best
1704 // precision we can in the hope of avoiding such a choice. Note that this
1705 // helps a lot on the Netlib problems.
1706 if (!basis_factorization_.IsRefactorized()) {
1707 VLOG(1) << "Refactorizing to avoid pivoting by "
1708 << direction_[*leaving_row]
1709 << " direction_infinity_norm_ = " << direction_infinity_norm_
1710 << " reduced cost = " << reduced_cost;
1711 *refactorize = true;
1712 return Status::OK();
1713 }
1714
1715 // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1716 // we kwnow that this pivot will still have an acceptable magnitude.
1717 //
1718 // TODO(user): An issue left to fix is that if there is no such pivot at
1719 // all, then we will report unbounded even if this is not really the case.
1720 // As of 2018/07/18, this happens on l30.mps.
1721 VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
1722 << " direction_infinity_norm_ = " << direction_infinity_norm_
1723 << " reduced cost = " << reduced_cost;
1724 DCHECK_GE(std::abs(direction_[*leaving_row]),
1725 parameters_.minimum_acceptable_pivot());
1726 IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
1727 }
1728 break;
1729 }
1730
1731 // Update the target bound.
1732 if (*leaving_row != kInvalidRow) {
1733 const bool is_reduced_cost_positive = (reduced_cost > 0.0);
1734 const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
1735 *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
1736 ? upper_bound_[basis_[*leaving_row]]
1737 : lower_bound_[basis_[*leaving_row]];
1738 }
1739
1740 // Stats.
1742 ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
1743 if (!equivalent_leaving_choices_.empty()) {
1744 ratio_test_stats_.num_perfect_ties.Add(
1745 equivalent_leaving_choices_.size());
1746 }
1747 if (*leaving_row != kInvalidRow) {
1748 ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
1749 }
1750 });
1751 return Status::OK();
1752}
1753
1754namespace {
1755
1756// Store a row with its ratio, coefficient magnitude and target bound. This is
1757// used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
1758// details.
1759struct BreakPoint {
1760 BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
1761 Fractional _target_bound)
1762 : row(_row),
1763 ratio(_ratio),
1764 coeff_magnitude(_coeff_magnitude),
1765 target_bound(_target_bound) {}
1766
1767 // We want to process the breakpoints by increasing ratio and decreasing
1768 // coefficient magnitude (if the ratios are the same). Returns false if "this"
1769 // is before "other" in a priority queue.
1770 bool operator<(const BreakPoint& other) const {
1771 if (ratio == other.ratio) {
1772 if (coeff_magnitude == other.coeff_magnitude) {
1773 return row > other.row;
1774 }
1775 return coeff_magnitude < other.coeff_magnitude;
1776 }
1777 return ratio > other.ratio;
1778 }
1779
1780 RowIndex row;
1784};
1785
1786} // namespace
1787
1788void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
1789 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1790 RowIndex* leaving_row, Fractional* step_length,
1791 Fractional* target_bound) const {
1792 SCOPED_TIME_STAT(&function_stats_);
1793 RETURN_IF_NULL(refactorize);
1794 RETURN_IF_NULL(leaving_row);
1795 RETURN_IF_NULL(step_length);
1796 DCHECK_COL_BOUNDS(entering_col);
1797 DCHECK_NE(0.0, reduced_cost);
1798
1799 // We initialize current_ratio with the maximum step the entering variable
1800 // can take (bound-flip). Note that we do not use tolerance here.
1801 const Fractional entering_value = variable_values_.Get(entering_col);
1802 Fractional current_ratio = (reduced_cost > 0.0)
1803 ? entering_value - lower_bound_[entering_col]
1804 : upper_bound_[entering_col] - entering_value;
1805 DCHECK_GT(current_ratio, 0.0);
1806
1807 std::vector<BreakPoint> breakpoints;
1808 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1809 for (const auto e : direction_) {
1810 const Fractional direction =
1811 reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
1812 const Fractional magnitude = std::abs(direction);
1813 if (magnitude < tolerance) continue;
1814
1815 // Computes by how much we can add 'direction' to the basic variable value
1816 // with index 'row' until it changes of primal feasibility status. That is
1817 // from infeasible to feasible or from feasible to infeasible. Note that the
1818 // transition infeasible->feasible->infeasible is possible. We use
1819 // tolerances here, but when the step will be performed, it will move the
1820 // variable to the target bound (possibly taking a small negative step).
1821 //
1822 // Note(user): The negative step will only happen when the leaving variable
1823 // was slightly infeasible (less than tolerance). Moreover, the overall
1824 // infeasibility will not necessarily increase since it doesn't take into
1825 // account all the variables with an infeasibility smaller than the
1826 // tolerance, and here we will at least improve the one of the leaving
1827 // variable.
1828 const ColIndex col = basis_[e.row()];
1829 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1830
1831 const Fractional value = variable_values_.Get(col);
1832 const Fractional lower_bound = lower_bound_[col];
1833 const Fractional upper_bound = upper_bound_[col];
1834 const Fractional to_lower = (lower_bound - tolerance - value) / direction;
1835 const Fractional to_upper = (upper_bound + tolerance - value) / direction;
1836
1837 // Enqueue the possible transitions. Note that the second tests exclude the
1838 // case where to_lower or to_upper are infinite.
1839 if (to_lower >= 0.0 && to_lower < current_ratio) {
1840 breakpoints.push_back(
1841 BreakPoint(e.row(), to_lower, magnitude, lower_bound));
1842 }
1843 if (to_upper >= 0.0 && to_upper < current_ratio) {
1844 breakpoints.push_back(
1845 BreakPoint(e.row(), to_upper, magnitude, upper_bound));
1846 }
1847 }
1848
1849 // Order the breakpoints by increasing ratio and decreasing coefficient
1850 // magnitude (if the ratios are the same).
1851 std::make_heap(breakpoints.begin(), breakpoints.end());
1852
1853 // Select the last breakpoint that still improves the infeasibility and has
1854 // the largest coefficient magnitude.
1855 Fractional improvement = std::abs(reduced_cost);
1856 Fractional best_magnitude = 0.0;
1857 *leaving_row = kInvalidRow;
1858 while (!breakpoints.empty()) {
1859 const BreakPoint top = breakpoints.front();
1860 // TODO(user): consider using >= here. That will lead to bigger ratio and
1861 // hence a better impact on the infeasibility. The drawback is that more
1862 // effort may be needed to update the reduced costs.
1863 //
1864 // TODO(user): Use a random tie breaking strategy for BreakPoint with
1865 // same ratio and same coefficient magnitude? Koberstein explains in his PhD
1866 // that it helped on the dual-simplex.
1867 if (top.coeff_magnitude > best_magnitude) {
1868 *leaving_row = top.row;
1869 current_ratio = top.ratio;
1870 best_magnitude = top.coeff_magnitude;
1871 *target_bound = top.target_bound;
1872 }
1873
1874 // As long as the sum of primal infeasibilities is decreasing, we look for
1875 // pivots that are numerically more stable.
1876 improvement -= top.coeff_magnitude;
1877 if (improvement <= 0.0) break;
1878 std::pop_heap(breakpoints.begin(), breakpoints.end());
1879 breakpoints.pop_back();
1880 }
1881
1882 // Try to avoid a small pivot by refactorizing.
1883 if (*leaving_row != kInvalidRow) {
1884 const Fractional threshold =
1885 parameters_.small_pivot_threshold() * direction_infinity_norm_;
1886 if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
1887 *refactorize = true;
1888 return;
1889 }
1890 }
1891 *step_length = current_ratio;
1892}
1893
1894// This implements the pricing step for the dual simplex.
1895Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
1896 Fractional* cost_variation,
1898 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1899 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
1900
1901 // TODO(user): Reuse parameters_.optimization_rule() to decide if we use
1902 // steepest edge or the normal Dantzig pricing.
1903 const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms();
1904 SCOPED_TIME_STAT(&function_stats_);
1905
1906 *leaving_row = kInvalidRow;
1907 Fractional best_price(0.0);
1908 const DenseColumn& squared_infeasibilities =
1909 variable_values_.GetPrimalSquaredInfeasibilities();
1910 equivalent_leaving_choices_.clear();
1911 for (const RowIndex row : variable_values_.GetPrimalInfeasiblePositions()) {
1912 const Fractional scaled_best_price = best_price * squared_norm[row];
1913 if (squared_infeasibilities[row] >= scaled_best_price) {
1914 if (squared_infeasibilities[row] == scaled_best_price) {
1915 DCHECK_NE(*leaving_row, kInvalidRow);
1916 equivalent_leaving_choices_.push_back(row);
1917 continue;
1918 }
1919 equivalent_leaving_choices_.clear();
1920 best_price = squared_infeasibilities[row] / squared_norm[row];
1921 *leaving_row = row;
1922 }
1923 }
1924
1925 // Break the ties randomly.
1926 if (!equivalent_leaving_choices_.empty()) {
1927 equivalent_leaving_choices_.push_back(*leaving_row);
1928 *leaving_row =
1929 equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1930 0, equivalent_leaving_choices_.size() - 1)(random_)];
1931 }
1932
1933 // Return right away if there is no leaving variable.
1934 // Fill cost_variation and target_bound otherwise.
1935 if (*leaving_row == kInvalidRow) return Status::OK();
1936 const ColIndex leaving_col = basis_[*leaving_row];
1937 const Fractional value = variable_values_.Get(leaving_col);
1938 if (value < lower_bound_[leaving_col]) {
1939 *cost_variation = lower_bound_[leaving_col] - value;
1940 *target_bound = lower_bound_[leaving_col];
1941 DCHECK_GT(*cost_variation, 0.0);
1942 } else {
1943 *cost_variation = upper_bound_[leaving_col] - value;
1944 *target_bound = upper_bound_[leaving_col];
1945 DCHECK_LT(*cost_variation, 0.0);
1946 }
1947 return Status::OK();
1948}
1949
1950namespace {
1951
1952// Returns true if a basic variable with given cost and type is to be considered
1953// as a leaving candidate for the dual phase I. This utility function is used
1954// to keep is_dual_entering_candidate_ up to date.
1955bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
1956 Fractional threshold) {
1957 if (cost == 0.0) return false;
1960 (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
1961 (type == VariableType::LOWER_BOUNDED && cost > threshold);
1962}
1963
1964} // namespace
1965
1966void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
1967 ColIndex entering_col) {
1968 SCOPED_TIME_STAT(&function_stats_);
1969 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
1970 const Fractional threshold = parameters_.ratio_test_zero_threshold();
1971
1972 // Convert the dual_pricing_vector_ from the old basis into the new one (which
1973 // is the same as multiplying it by an Eta matrix corresponding to the
1974 // direction).
1975 const Fractional step =
1976 dual_pricing_vector_[leaving_row] / direction_[leaving_row];
1977 for (const auto e : direction_) {
1978 dual_pricing_vector_[e.row()] -= e.coefficient() * step;
1979 is_dual_entering_candidate_.Set(
1980 e.row(), IsDualPhaseILeavingCandidate(dual_pricing_vector_[e.row()],
1981 variable_type[basis_[e.row()]],
1982 threshold));
1983 }
1984 dual_pricing_vector_[leaving_row] = step;
1985
1986 // The entering_col which was dual-infeasible is now dual-feasible, so we
1987 // have to remove it from the infeasibility sum.
1988 dual_pricing_vector_[leaving_row] -=
1989 dual_infeasibility_improvement_direction_[entering_col];
1990 if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
1991 --num_dual_infeasible_positions_;
1992 }
1993 dual_infeasibility_improvement_direction_[entering_col] = 0.0;
1994
1995 // The leaving variable will also be dual-feasible.
1996 dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
1997
1998 // Update the leaving row entering candidate status.
1999 is_dual_entering_candidate_.Set(
2000 leaving_row,
2001 IsDualPhaseILeavingCandidate(dual_pricing_vector_[leaving_row],
2002 variable_type[entering_col], threshold));
2003}
2004
2005template <typename Cols>
2006void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2007 const Cols& cols) {
2008 SCOPED_TIME_STAT(&function_stats_);
2009 bool something_to_do = false;
2010 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
2011 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
2012 const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2013 const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2014 for (ColIndex col : cols) {
2015 const Fractional reduced_cost = reduced_costs[col];
2016 const Fractional sign =
2017 (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0
2018 : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0
2019 : 0.0;
2020 if (sign != dual_infeasibility_improvement_direction_[col]) {
2021 if (sign == 0.0) {
2022 --num_dual_infeasible_positions_;
2023 } else if (dual_infeasibility_improvement_direction_[col] == 0.0) {
2024 ++num_dual_infeasible_positions_;
2025 }
2026 if (!something_to_do) {
2027 initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2028 initially_all_zero_scratchpad_.ClearSparseMask();
2029 initially_all_zero_scratchpad_.non_zeros.clear();
2030 something_to_do = true;
2031 }
2033 col, sign - dual_infeasibility_improvement_direction_[col],
2034 &initially_all_zero_scratchpad_);
2035 dual_infeasibility_improvement_direction_[col] = sign;
2036 }
2037 }
2038 if (something_to_do) {
2039 initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2040 initially_all_zero_scratchpad_.ClearSparseMask();
2041
2042 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2043 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2044 basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2045 if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2046 for (RowIndex row(0); row < num_rows_; ++row) {
2047 if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2048 dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2049 is_dual_entering_candidate_.Set(
2050 row, IsDualPhaseILeavingCandidate(dual_pricing_vector_[row],
2051 variable_type[basis_[row]],
2052 threshold));
2053 }
2054 initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2055 } else {
2056 for (const auto e : initially_all_zero_scratchpad_) {
2057 dual_pricing_vector_[e.row()] += e.coefficient();
2058 initially_all_zero_scratchpad_[e.row()] = 0.0;
2059 is_dual_entering_candidate_.Set(
2060 e.row(), IsDualPhaseILeavingCandidate(
2061 dual_pricing_vector_[e.row()],
2062 variable_type[basis_[e.row()]], threshold));
2063 }
2064 }
2065 initially_all_zero_scratchpad_.non_zeros.clear();
2066 }
2067}
2068
2069Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2070 RowIndex* leaving_row, Fractional* cost_variation,
2072 SCOPED_TIME_STAT(&function_stats_);
2073 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2074 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2075
2076 // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2077 // positions and contains the sign in which the reduced cost of this column
2078 // needs to move to improve the feasibility otherwise (+1 or -1).
2079 //
2080 // Its current value was the one used to compute dual_pricing_vector_ and
2081 // was updated accordingly by DualPhaseIUpdatePrice().
2082 //
2083 // If more variables changed of dual-feasibility status during the last
2084 // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2085 // take them into account.
2086 if (reduced_costs_.AreReducedCostsRecomputed() ||
2087 dual_pricing_vector_.empty()) {
2088 // Recompute everything from scratch.
2089 num_dual_infeasible_positions_ = 0;
2090 dual_pricing_vector_.AssignToZero(num_rows_);
2091 is_dual_entering_candidate_.ClearAndResize(num_rows_);
2092 dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2093 DualPhaseIUpdatePriceOnReducedCostChange(
2094 variables_info_.GetIsRelevantBitRow());
2095 } else {
2096 // Update row is still equal to the row used during the last iteration
2097 // to update the reduced costs.
2098 DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2099 }
2100
2101 // If there is no dual-infeasible position, we are done.
2102 *leaving_row = kInvalidRow;
2103 if (num_dual_infeasible_positions_ == 0) return Status::OK();
2104
2105 // TODO(user): Reuse parameters_.optimization_rule() to decide if we use
2106 // steepest edge or the normal Dantzig pricing.
2107 const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms();
2108
2109 // Now take a leaving variable that maximizes the infeasibility variation and
2110 // can leave the basis while being dual-feasible.
2111 Fractional best_price(0.0);
2112 equivalent_leaving_choices_.clear();
2113 for (const RowIndex row : is_dual_entering_candidate_) {
2114 const Fractional squared_cost = Square(dual_pricing_vector_[row]);
2115 const Fractional scaled_best_price = best_price * squared_norm[row];
2116 if (squared_cost >= scaled_best_price) {
2117 if (squared_cost == scaled_best_price) {
2118 DCHECK_NE(*leaving_row, kInvalidRow);
2119 equivalent_leaving_choices_.push_back(row);
2120 continue;
2121 }
2122 equivalent_leaving_choices_.clear();
2123 best_price = squared_cost / squared_norm[row];
2124 *leaving_row = row;
2125 }
2126 }
2127
2128 // Break the ties randomly.
2129 if (!equivalent_leaving_choices_.empty()) {
2130 equivalent_leaving_choices_.push_back(*leaving_row);
2131 *leaving_row =
2132 equivalent_leaving_choices_[std::uniform_int_distribution<int>(
2133 0, equivalent_leaving_choices_.size() - 1)(random_)];
2134 }
2135
2136 // Returns right away if there is no leaving variable or fill the other
2137 // return values otherwise.
2138 if (*leaving_row == kInvalidRow) return Status::OK();
2139 *cost_variation = dual_pricing_vector_[*leaving_row];
2140 const ColIndex leaving_col = basis_[*leaving_row];
2141 if (*cost_variation < 0.0) {
2142 *target_bound = upper_bound_[leaving_col];
2143 } else {
2144 *target_bound = lower_bound_[leaving_col];
2145 }
2147 return Status::OK();
2148}
2149
2150template <typename BoxedVariableCols>
2151void RevisedSimplex::MakeBoxedVariableDualFeasible(
2152 const BoxedVariableCols& cols, bool update_basic_values) {
2153 SCOPED_TIME_STAT(&function_stats_);
2154 std::vector<ColIndex> changed_cols;
2155
2156 // It is important to flip bounds within a tolerance because of precision
2157 // errors. Otherwise, this leads to cycling on many of the Netlib problems
2158 // since this is called at each iteration (because of the bound-flipping ratio
2159 // test).
2160 const DenseRow& variable_values = variable_values_.GetDenseRow();
2161 const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2162 const Fractional dual_feasibility_tolerance =
2163 reduced_costs_.GetDualFeasibilityTolerance();
2164 const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2165 for (const ColIndex col : cols) {
2166 const Fractional reduced_cost = reduced_costs[col];
2167 const VariableStatus status = variable_status[col];
2168 DCHECK(variables_info_.GetTypeRow()[col] ==
2170 // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2171 DCHECK(variable_values[col] == lower_bound_[col] ||
2172 variable_values[col] == upper_bound_[col] ||
2173 status == VariableStatus::BASIC);
2174 if (reduced_cost > dual_feasibility_tolerance &&
2176 variables_info_.Update(col, VariableStatus::AT_LOWER_BOUND);
2177 changed_cols.push_back(col);
2178 } else if (reduced_cost < -dual_feasibility_tolerance &&
2180 variables_info_.Update(col, VariableStatus::AT_UPPER_BOUND);
2181 changed_cols.push_back(col);
2182 }
2183 }
2184
2185 if (!changed_cols.empty()) {
2186 variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2187 update_basic_values);
2188 }
2189}
2190
2191Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2192 RowIndex leaving_row, Fractional target_bound) {
2193 SCOPED_TIME_STAT(&function_stats_);
2194
2195 // We just want the leaving variable to go to its target_bound.
2196 const ColIndex leaving_col = basis_[leaving_row];
2197 const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2198 Fractional unscaled_step = leaving_variable_value - target_bound;
2199
2200 // In Chvatal p 157 update_[entering_col] is used instead of
2201 // direction_[leaving_row], but the two quantities are actually the
2202 // same. This is because update_[col] is the value at leaving_row of
2203 // the right inverse of col and direction_ is the right inverse of the
2204 // entering_col. Note that direction_[leaving_row] is probably more
2205 // precise.
2206 // TODO(user): use this to check precision and trigger recomputation.
2207 return unscaled_step / direction_[leaving_row];
2208}
2209
2210bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2211 VLOG(1) << "Test pivot.";
2212 SCOPED_TIME_STAT(&function_stats_);
2213 const ColIndex leaving_col = basis_[leaving_row];
2214 basis_[leaving_row] = entering_col;
2215
2216 // TODO(user): If 'is_ok' is true, we could use the computed lu in
2217 // basis_factorization_ rather than recompute it during UpdateAndPivot().
2218 CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2219 const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2220 basis_[leaving_row] = leaving_col;
2221 return is_ok;
2222}
2223
2224// Note that this function is an optimization and that if it was doing nothing
2225// the algorithm will still be correct and work. Using it does change the pivot
2226// taken during the simplex method though.
2227void RevisedSimplex::PermuteBasis() {
2228 SCOPED_TIME_STAT(&function_stats_);
2229
2230 // Fetch the current basis column permutation and return if it is empty which
2231 // means the permutation is the identity.
2232 const ColumnPermutation& col_perm =
2233 basis_factorization_.GetColumnPermutation();
2234 if (col_perm.empty()) return;
2235
2236 // Permute basis_.
2238
2239 // Permute dual_pricing_vector_ if needed.
2240 if (!dual_pricing_vector_.empty()) {
2241 // TODO(user): We need to permute is_dual_entering_candidate_ too. Right
2242 // now, we recompute both the dual_pricing_vector_ and
2243 // is_dual_entering_candidate_ on each refactorization, so this don't
2244 // matter.
2245 ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_);
2246 }
2247
2248 // Notify the other classes.
2249 reduced_costs_.UpdateDataOnBasisPermutation();
2250 dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2251
2252 // Finally, remove the column permutation from all subsequent solves since
2253 // it has been taken into account in basis_.
2254 basis_factorization_.SetColumnPermutationToIdentity();
2255}
2256
2257Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2258 RowIndex leaving_row,
2260 SCOPED_TIME_STAT(&function_stats_);
2261 const ColIndex leaving_col = basis_[leaving_row];
2262 const VariableStatus leaving_variable_status =
2263 lower_bound_[leaving_col] == upper_bound_[leaving_col]
2265 : target_bound == lower_bound_[leaving_col]
2268 if (variable_values_.Get(leaving_col) != target_bound) {
2269 ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2270 target_bound);
2271 }
2272 UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2273
2274 const Fractional pivot_from_direction = direction_[leaving_row];
2275 const Fractional pivot_from_update_row =
2276 update_row_.GetCoefficient(entering_col);
2277 const Fractional diff =
2278 std::abs(pivot_from_update_row - pivot_from_direction);
2279 if (diff > parameters_.refactorization_threshold() *
2280 (1 + std::abs(pivot_from_direction))) {
2281 VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2282 << " diff = " << diff;
2283 GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2284 } else {
2286 basis_factorization_.Update(entering_col, leaving_row, direction_));
2287 }
2288 if (basis_factorization_.IsRefactorized()) {
2289 PermuteBasis();
2290 }
2291 return Status::OK();
2292}
2293
2294bool RevisedSimplex::NeedsBasisRefactorization(bool refactorize) {
2295 if (basis_factorization_.IsRefactorized()) return false;
2296 if (reduced_costs_.NeedsBasisRefactorization()) return true;
2297 const GlopParameters::PricingRule pricing_rule =
2298 feasibility_phase_ ? parameters_.feasibility_rule()
2299 : parameters_.optimization_rule();
2300 if (parameters_.use_dual_simplex()) {
2301 // TODO(user): Currently the dual is always using STEEPEST_EDGE.
2302 DCHECK_EQ(pricing_rule, GlopParameters::STEEPEST_EDGE);
2303 if (dual_edge_norms_.NeedsBasisRefactorization()) return true;
2304 } else {
2305 if (pricing_rule == GlopParameters::STEEPEST_EDGE &&
2306 primal_edge_norms_.NeedsBasisRefactorization()) {
2307 return true;
2308 }
2309 }
2310 return refactorize;
2311}
2312
2313Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2314 SCOPED_TIME_STAT(&function_stats_);
2315 if (NeedsBasisRefactorization(*refactorize)) {
2316 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2317 update_row_.Invalidate();
2318 PermuteBasis();
2319 }
2320 *refactorize = false;
2321 return Status::OK();
2322}
2323
2325 if (col >= integrality_scale_.size()) {
2326 integrality_scale_.resize(col + 1, 0.0);
2327 }
2328 integrality_scale_[col] = scale;
2329}
2330
2331Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2333 Cleanup update_deterministic_time_on_return(
2334 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2335
2336 // Get all non-basic variables with a reduced costs close to zero.
2337 // Note that because we only choose entering candidate with a cost of zero,
2338 // this set will not change (modulo epsilons).
2339 const DenseRow& rc = reduced_costs_.GetReducedCosts();
2340 std::vector<ColIndex> candidates;
2341 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2342 if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2343 if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2344 }
2345
2346 bool refactorize = false;
2347 int num_pivots = 0;
2348 Fractional total_gain = 0.0;
2349 for (int i = 0; i < 10; ++i) {
2350 AdvanceDeterministicTime(time_limit);
2351 if (time_limit->LimitReached()) break;
2352 if (num_pivots >= 5) break;
2353 if (candidates.empty()) break;
2354
2355 // Pick a random one and remove it from the list.
2356 const int index =
2357 std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2358 const ColIndex entering_col = candidates[index];
2359 std::swap(candidates[index], candidates.back());
2360 candidates.pop_back();
2361
2362 // We need the entering variable to move in the correct direction.
2363 Fractional fake_rc = 1.0;
2364 if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2365 CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2366 fake_rc = -1.0;
2367 }
2368
2369 // Compute the direction and by how much we can move along it.
2370 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2371 ComputeDirection(entering_col);
2372 Fractional step_length;
2373 RowIndex leaving_row;
2375 bool local_refactorize = false;
2377 ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2378 &leaving_row, &step_length, &target_bound));
2379
2380 if (local_refactorize) continue;
2381 if (step_length == kInfinity || step_length == -kInfinity) continue;
2382 if (std::abs(step_length) <= 1e-6) continue;
2383 if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2384 continue;
2385 }
2386 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2387
2388 // Evaluate if pivot reduce the fractionality of the basis.
2389 //
2390 // TODO(user): Count with more weight variable with a small domain, i.e.
2391 // binary variable, compared to a variable in [0, 1k] ?
2392 const auto get_diff = [this](ColIndex col, Fractional old_value,
2393 Fractional new_value) {
2394 if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2395 return 0.0;
2396 }
2397 const Fractional s = integrality_scale_[col];
2398 return (std::abs(new_value * s - std::round(new_value * s)) -
2399 std::abs(old_value * s - std::round(old_value * s)));
2400 };
2401 Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2402 variable_values_.Get(entering_col) + step);
2403 for (const auto e : direction_) {
2404 const ColIndex col = basis_[e.row()];
2405 const Fractional old_value = variable_values_.Get(col);
2406 const Fractional new_value = old_value - e.coefficient() * step;
2407 diff += get_diff(col, old_value, new_value);
2408 }
2409
2410 // Ignore low decrease in integrality.
2411 if (diff > -1e-2) continue;
2412 total_gain -= diff;
2413
2414 // We perform the change.
2415 num_pivots++;
2416 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2417
2418 // This is a bound flip of the entering column.
2419 if (leaving_row == kInvalidRow) {
2420 if (step > 0.0) {
2421 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2423 } else if (step < 0.0) {
2424 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2426 }
2427 reduced_costs_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2428 continue;
2429 }
2430
2431 // Perform the pivot.
2432 const ColIndex leaving_col = basis_[leaving_row];
2433 update_row_.ComputeUpdateRow(leaving_row);
2434 primal_edge_norms_.UpdateBeforeBasisPivot(
2435 entering_col, leaving_col, leaving_row, direction_, &update_row_);
2436 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2437 &update_row_);
2438
2439 const Fractional dir = -direction_[leaving_row] * step;
2440 const bool is_degenerate =
2441 (dir == 0.0) ||
2442 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2443 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2444 if (!is_degenerate) {
2445 variable_values_.Set(leaving_col, target_bound);
2446 }
2448 UpdateAndPivot(entering_col, leaving_row, target_bound));
2449 }
2450
2451 VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2452 return Status::OK();
2453}
2454
2455// Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2456// x an n-vector.
2457//
2458// x is split in two parts x_B and x_N (B standing for basis).
2459// In the same way, A is split in A_B (also known as B) and A_N, and
2460// c is split into c_B and c_N.
2461//
2462// The goal is to minimize c_B.x_B + c_N.x_N
2463// subject to B.x_B + A_N.x_N = 0
2464// and x_lower <= x <= x_upper.
2465//
2466// To minimize c.x, at each iteration a variable from x_N is selected to
2467// enter the basis, and a variable from x_B is selected to leave the basis.
2468// To avoid explicit inversion of B, the algorithm solves two sub-systems:
2469// y.B = c_B and B.d = a (a being the entering column).
2470Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
2472 Cleanup update_deterministic_time_on_return(
2473 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2474 num_consecutive_degenerate_iterations_ = 0;
2475 DisplayIterationInfo();
2476 bool refactorize = false;
2477
2478 if (feasibility_phase_) {
2479 // Initialize the primal phase-I objective.
2480 // Note that this temporarily erases the problem objective.
2481 objective_.AssignToZero(num_cols_);
2482 variable_values_.UpdatePrimalPhaseICosts(
2483 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2484 reduced_costs_.ResetForNewObjective();
2485 }
2486
2487 while (true) {
2488 // TODO(user): we may loop a bit more than the actual number of iteration.
2489 // fix.
2491 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2492 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2493 if (basis_factorization_.IsRefactorized()) {
2494 CorrectErrorsOnVariableValues();
2495 DisplayIterationInfo();
2496
2497 if (feasibility_phase_) {
2498 // Since the variable values may have been recomputed, we need to
2499 // recompute the primal infeasible variables and update their costs.
2500 if (variable_values_.UpdatePrimalPhaseICosts(
2501 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2502 &objective_)) {
2503 reduced_costs_.ResetForNewObjective();
2504 }
2505 }
2506
2507 // Computing the objective at each iteration takes time, so we just
2508 // check the limit when the basis is refactorized.
2509 if (!feasibility_phase_ &&
2510 ComputeObjectiveValue() < primal_objective_limit_) {
2511 VLOG(1) << "Stopping the primal simplex because"
2512 << " the objective limit " << primal_objective_limit_
2513 << " has been reached.";
2514 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2515 objective_limit_reached_ = true;
2516 return Status::OK();
2517 }
2518 } else if (feasibility_phase_) {
2519 // Note that direction_.non_zeros contains the positions of the basic
2520 // variables whose values were updated during the last iteration.
2521 if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2522 &objective_)) {
2523 reduced_costs_.ResetForNewObjective();
2524 }
2525 }
2526
2527 Fractional reduced_cost = 0.0;
2528 ColIndex entering_col = kInvalidCol;
2530 entering_variable_.PrimalChooseEnteringColumn(&entering_col));
2531 if (entering_col == kInvalidCol) {
2532 if (reduced_costs_.AreReducedCostsPrecise() &&
2533 basis_factorization_.IsRefactorized()) {
2534 if (feasibility_phase_) {
2535 const Fractional primal_infeasibility =
2536 variable_values_.ComputeMaximumPrimalInfeasibility();
2537 if (primal_infeasibility <
2538 parameters_.primal_feasibility_tolerance()) {
2539 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2540 } else {
2541 VLOG(1) << "Infeasible problem! infeasibility = "
2542 << primal_infeasibility;
2543 problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2544 }
2545 } else {
2546 problem_status_ = ProblemStatus::OPTIMAL;
2547 }
2548 break;
2549 } else {
2550 VLOG(1) << "Optimal reached, double checking...";
2551 reduced_costs_.MakeReducedCostsPrecise();
2552 refactorize = true;
2553 continue;
2554 }
2555 } else {
2556 reduced_cost = reduced_costs_.GetReducedCosts()[entering_col];
2557 DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2558
2559 // Solve the system B.d = a with a the entering column.
2560 ComputeDirection(entering_col);
2561 primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2562 direction_);
2563 if (!reduced_costs_.TestEnteringReducedCostPrecision(
2564 entering_col, direction_, &reduced_cost)) {
2565 VLOG(1) << "Skipping col #" << entering_col << " whose reduced cost is "
2566 << reduced_cost;
2567 continue;
2568 }
2569 }
2570
2571 // This test takes place after the check for optimality/feasibility because
2572 // when running with 0 iterations, we still want to report
2573 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2574 // case at the beginning of the algorithm.
2575 AdvanceDeterministicTime(time_limit);
2576 if (num_iterations_ == parameters_.max_number_of_iterations() ||
2577 time_limit->LimitReached()) {
2578 break;
2579 }
2580
2581 Fractional step_length;
2582 RowIndex leaving_row;
2584 if (feasibility_phase_) {
2585 PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2586 &refactorize, &leaving_row,
2587 &step_length, &target_bound);
2588 } else {
2590 ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2591 &leaving_row, &step_length, &target_bound));
2592 }
2593 if (refactorize) continue;
2594
2595 if (step_length == kInfinity || step_length == -kInfinity) {
2596 if (!basis_factorization_.IsRefactorized() ||
2597 !reduced_costs_.AreReducedCostsPrecise()) {
2598 VLOG(1) << "Infinite step length, double checking...";
2599 reduced_costs_.MakeReducedCostsPrecise();
2600 continue;
2601 }
2602 if (feasibility_phase_) {
2603 // This shouldn't happen by construction.
2604 VLOG(1) << "Unbounded feasibility problem !?";
2605 problem_status_ = ProblemStatus::ABNORMAL;
2606 } else {
2607 VLOG(1) << "Unbounded problem.";
2608 problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2609 solution_primal_ray_.AssignToZero(num_cols_);
2610 for (RowIndex row(0); row < num_rows_; ++row) {
2611 const ColIndex col = basis_[row];
2612 solution_primal_ray_[col] = -direction_[row];
2613 }
2614 solution_primal_ray_[entering_col] = 1.0;
2615 if (step_length == -kInfinity) {
2616 ChangeSign(&solution_primal_ray_);
2617 }
2618 }
2619 break;
2620 }
2621
2622 Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2623 if (feasibility_phase_ && leaving_row != kInvalidRow) {
2624 // For phase-I we currently always set the leaving variable to its exact
2625 // bound even if by doing so we may take a small step in the wrong
2626 // direction and may increase the overall infeasibility.
2627 //
2628 // TODO(user): Investigate alternatives even if this seems to work well in
2629 // practice. Note that the final returned solution will have the property
2630 // that all non-basic variables are at their exact bound, so it is nice
2631 // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2632 // this property cannot be found.
2633 step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2634 }
2635
2636 // Store the leaving_col before basis_ change.
2637 const ColIndex leaving_col =
2638 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2639
2640 // An iteration is called 'degenerate' if the leaving variable is already
2641 // primal-infeasible and we make it even more infeasible or if we do a zero
2642 // step.
2643 bool is_degenerate = false;
2644 if (leaving_row != kInvalidRow) {
2645 Fractional dir = -direction_[leaving_row] * step;
2646 is_degenerate =
2647 (dir == 0.0) ||
2648 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2649 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2650
2651 // If the iteration is not degenerate, the leaving variable should go to
2652 // its exact target bound (it is how the step is computed).
2653 if (!is_degenerate) {
2654 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
2655 target_bound));
2656 }
2657 }
2658
2659 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2660 if (leaving_row != kInvalidRow) {
2661 primal_edge_norms_.UpdateBeforeBasisPivot(
2662 entering_col, basis_[leaving_row], leaving_row, direction_,
2663 &update_row_);
2664 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
2665 direction_, &update_row_);
2666 if (!is_degenerate) {
2667 // On a non-degenerate iteration, the leaving variable should be at its
2668 // exact bound. This corrects an eventual small numerical error since
2669 // 'value + direction * step' where step is
2670 // '(target_bound - value) / direction'
2671 // may be slighlty different from target_bound.
2672 variable_values_.Set(leaving_col, target_bound);
2673 }
2675 UpdateAndPivot(entering_col, leaving_row, target_bound));
2677 if (is_degenerate) {
2678 timer.AlsoUpdate(&iteration_stats_.degenerate);
2679 } else {
2680 timer.AlsoUpdate(&iteration_stats_.normal);
2681 }
2682 });
2683 } else {
2684 // Bound flip. This makes sure that the flipping variable is at its bound
2685 // and has the correct status.
2687 variables_info_.GetTypeRow()[entering_col]);
2688 if (step > 0.0) {
2689 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2691 } else if (step < 0.0) {
2692 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2694 }
2695 reduced_costs_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2696 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
2697 }
2698
2699 if (feasibility_phase_ && leaving_row != kInvalidRow) {
2700 // Set the leaving variable to its exact bound.
2701 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2702 reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
2703 &objective_[leaving_col]);
2704 }
2705
2706 // Stats about consecutive degenerate iterations.
2707 if (step_length == 0.0) {
2708 num_consecutive_degenerate_iterations_++;
2709 } else {
2710 if (num_consecutive_degenerate_iterations_ > 0) {
2711 iteration_stats_.degenerate_run_size.Add(
2712 num_consecutive_degenerate_iterations_);
2713 num_consecutive_degenerate_iterations_ = 0;
2714 }
2715 }
2716 ++num_iterations_;
2717 }
2718 if (num_consecutive_degenerate_iterations_ > 0) {
2719 iteration_stats_.degenerate_run_size.Add(
2720 num_consecutive_degenerate_iterations_);
2721 }
2722 return Status::OK();
2723}
2724
2725// TODO(user): Two other approaches for the phase I described in Koberstein's
2726// PhD thesis seem worth trying at some point:
2727// - The subproblem approach, which enables one to use a normal phase II dual,
2728// but requires an efficient bound-flipping ratio test since the new problem
2729// has all its variables boxed.
2730// - Pan's method, which is really fast but have no theoretical guarantee of
2731// terminating and thus needs to use one of the other methods as a fallback if
2732// it fails to make progress.
2733//
2734// Note that the returned status applies to the primal problem!
2735Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) {
2736 Cleanup update_deterministic_time_on_return(
2737 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2738 num_consecutive_degenerate_iterations_ = 0;
2739 bool refactorize = false;
2740
2741 bound_flip_candidates_.clear();
2742 pair_to_ignore_.clear();
2743
2744 // Leaving variable.
2745 RowIndex leaving_row;
2746 Fractional cost_variation;
2748
2749 // Entering variable.
2750 ColIndex entering_col;
2752
2753 while (true) {
2754 // TODO(user): we may loop a bit more than the actual number of iteration.
2755 // fix.
2757 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2758
2759 const bool old_refactorize_value = refactorize;
2760 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2761
2762 // If the basis is refactorized, we recompute all the values in order to
2763 // have a good precision.
2764 if (basis_factorization_.IsRefactorized()) {
2765 // We do not want to recompute the reduced costs too often, this is
2766 // because that may break the overall direction taken by the last steps
2767 // and may lead to less improvement on degenerate problems.
2768 //
2769 // During phase-I, we do want the reduced costs to be as precise as
2770 // possible. TODO(user): Investigate why and fix the TODO in
2771 // PermuteBasis().
2772 //
2773 // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
2774 // do recompute them, it is better to do that first.
2775 if (!feasibility_phase_ && !reduced_costs_.AreReducedCostsRecomputed() &&
2776 !old_refactorize_value) {
2777 const Fractional dual_residual_error =
2778 reduced_costs_.ComputeMaximumDualResidual();
2779 if (dual_residual_error >
2780 reduced_costs_.GetDualFeasibilityTolerance()) {
2781 VLOG(1) << "Recomputing reduced costs. Dual residual = "
2782 << dual_residual_error;
2783 reduced_costs_.MakeReducedCostsPrecise();
2784 }
2785 } else {
2786 reduced_costs_.MakeReducedCostsPrecise();
2787 }
2788
2789 // TODO(user): Make RecomputeBasicVariableValues() do nothing
2790 // if it was already recomputed on a refactorized basis. This is the
2791 // same behavior as MakeReducedCostsPrecise().
2792 //
2793 // TODO(user): Do not recompute the variable values each time we
2794 // refactorize the matrix, like for the reduced costs? That may lead to
2795 // a worse behavior than keeping the "imprecise" version and only
2796 // recomputing it when its precision is above a threshold.
2797 if (!feasibility_phase_) {
2798 MakeBoxedVariableDualFeasible(
2799 variables_info_.GetNonBasicBoxedVariables(),
2800 /*update_basic_values=*/false);
2801 variable_values_.RecomputeBasicVariableValues();
2802 variable_values_.ResetPrimalInfeasibilityInformation();
2803
2804 // Computing the objective at each iteration takes time, so we just
2805 // check the limit when the basis is refactorized.
2806 if (ComputeObjectiveValue() > dual_objective_limit_) {
2807 VLOG(1) << "Stopping the dual simplex because"
2808 << " the objective limit " << dual_objective_limit_
2809 << " has been reached.";
2810 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2811 objective_limit_reached_ = true;
2812 return Status::OK();
2813 }
2814 }
2815
2816 reduced_costs_.GetReducedCosts();
2817 DisplayIterationInfo();
2818 } else {
2819 // Updates from the previous iteration that can be skipped if we
2820 // recompute everything (see other case above).
2821 if (!feasibility_phase_) {
2822 // Make sure the boxed variables are dual-feasible before choosing the
2823 // leaving variable row.
2824 MakeBoxedVariableDualFeasible(bound_flip_candidates_,
2825 /*update_basic_values=*/true);
2826 bound_flip_candidates_.clear();
2827
2828 // The direction_.non_zeros contains the positions for which the basic
2829 // variable value was changed during the previous iterations.
2830 variable_values_.UpdatePrimalInfeasibilityInformation(
2831 direction_.non_zeros);
2832 }
2833 }
2834
2835 if (feasibility_phase_) {
2836 GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
2837 &leaving_row, &cost_variation, &target_bound));
2838 } else {
2839 GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
2840 &leaving_row, &cost_variation, &target_bound));
2841 }
2842 if (leaving_row == kInvalidRow) {
2843 if (!basis_factorization_.IsRefactorized()) {
2844 VLOG(1) << "Optimal reached, double checking.";
2845 refactorize = true;
2846 continue;
2847 }
2848 if (feasibility_phase_) {
2849 // Note that since the basis is refactorized, the variable values
2850 // will be recomputed at the beginning of the second phase. The boxed
2851 // variable values will also be corrected by
2852 // MakeBoxedVariableDualFeasible().
2853 if (num_dual_infeasible_positions_ == 0) {
2854 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2855 } else {
2856 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
2857 }
2858 } else {
2859 problem_status_ = ProblemStatus::OPTIMAL;
2860 }
2861 return Status::OK();
2862 }
2863
2864 update_row_.ComputeUpdateRow(leaving_row);
2865 for (std::pair<RowIndex, ColIndex> pair : pair_to_ignore_) {
2866 if (pair.first == leaving_row) {
2867 update_row_.IgnoreUpdatePosition(pair.second);
2868 }
2869 }
2870 if (feasibility_phase_) {
2872 update_row_, cost_variation, &entering_col, &ratio));
2873 } else {
2875 update_row_, cost_variation, &bound_flip_candidates_, &entering_col,
2876 &ratio));
2877 }
2878
2879 // No entering_col: Unbounded problem / Infeasible problem.
2880 if (entering_col == kInvalidCol) {
2881 if (!reduced_costs_.AreReducedCostsPrecise()) {
2882 VLOG(1) << "No entering column. Double checking...";
2883 refactorize = true;
2884 continue;
2885 }
2886 DCHECK(basis_factorization_.IsRefactorized());
2887 if (feasibility_phase_) {
2888 // This shouldn't happen by construction.
2889 VLOG(1) << "Unbounded dual feasibility problem !?";
2890 problem_status_ = ProblemStatus::ABNORMAL;
2891 } else {
2892 problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
2893 solution_dual_ray_ =
2894 Transpose(update_row_.GetUnitRowLeftInverse().values);
2895 update_row_.RecomputeFullUpdateRow(leaving_row);
2896 solution_dual_ray_row_combination_.AssignToZero(num_cols_);
2897 for (const ColIndex col : update_row_.GetNonZeroPositions()) {
2898 solution_dual_ray_row_combination_[col] =
2899 update_row_.GetCoefficient(col);
2900 }
2901 if (cost_variation < 0) {
2902 ChangeSign(&solution_dual_ray_);
2903 ChangeSign(&solution_dual_ray_row_combination_);
2904 }
2905 }
2906 return Status::OK();
2907 }
2908
2909 // If the coefficient is too small, we recompute the reduced costs.
2910 const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
2911 if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
2912 !reduced_costs_.AreReducedCostsPrecise()) {
2913 VLOG(1) << "Trying not to pivot by " << entering_coeff;
2914 refactorize = true;
2915 continue;
2916 }
2917
2918 // If the reduced cost is already precise, we check with the direction_.
2919 // This is at least needed to avoid corner cases where
2920 // direction_[leaving_row] is actually 0 which causes a floating
2921 // point exception below.
2922 ComputeDirection(entering_col);
2923 if (std::abs(direction_[leaving_row]) <
2924 parameters_.minimum_acceptable_pivot()) {
2925 VLOG(1) << "Do not pivot by " << entering_coeff
2926 << " because the direction is " << direction_[leaving_row];
2927 refactorize = true;
2928 pair_to_ignore_.push_back({leaving_row, entering_col});
2929 continue;
2930 }
2931 pair_to_ignore_.clear();
2932
2933 // This test takes place after the check for optimality/feasibility because
2934 // when running with 0 iterations, we still want to report
2935 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2936 // case at the beginning of the algorithm.
2937 AdvanceDeterministicTime(time_limit);
2938 if (num_iterations_ == parameters_.max_number_of_iterations() ||
2939 time_limit->LimitReached()) {
2940 return Status::OK();
2941 }
2942
2944 if (ratio == 0.0) {
2945 timer.AlsoUpdate(&iteration_stats_.degenerate);
2946 } else {
2947 timer.AlsoUpdate(&iteration_stats_.normal);
2948 }
2949 });
2950
2951 // Update basis. Note that direction_ is already computed.
2952 //
2953 // TODO(user): this is pretty much the same in the primal or dual code.
2954 // We just need to know to what bound the leaving variable will be set to.
2955 // Factorize more common code?
2956 //
2957 // During phase I, we do not need the basic variable values at all.
2958 Fractional primal_step = 0.0;
2959 if (feasibility_phase_) {
2960 DualPhaseIUpdatePrice(leaving_row, entering_col);
2961 } else {
2962 primal_step =
2963 ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2964 variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
2965 }
2966
2967 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2968 &update_row_);
2969 dual_edge_norms_.UpdateBeforeBasisPivot(
2970 entering_col, leaving_row, direction_,
2971 update_row_.GetUnitRowLeftInverse());
2972
2973 // It is important to do the actual pivot after the update above!
2974 const ColIndex leaving_col = basis_[leaving_row];
2976 UpdateAndPivot(entering_col, leaving_row, target_bound));
2977
2978 // This makes sure the leaving variable is at its exact bound. Tests
2979 // indicate that this makes everything more stable. Note also that during
2980 // the feasibility phase, the variable values are not used, but that the
2981 // correct non-basic variable value are needed at the end.
2982 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2983
2984 // This is slow, but otherwise we have a really bad precision on the
2985 // variable values ...
2986 if (std::abs(primal_step) * parameters_.primal_feasibility_tolerance() >
2987 1.0) {
2988 refactorize = true;
2989 }
2990 ++num_iterations_;
2991 }
2992 return Status::OK();
2993}
2994
2995ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
2996 // TODO(user): Remove this function.
2998 return first_slack_col_ + RowToColIndex(row);
2999}
3000
3002 std::string result;
3003 result.append(iteration_stats_.StatString());
3004 result.append(ratio_test_stats_.StatString());
3005 result.append(entering_variable_.StatString());
3006 result.append(reduced_costs_.StatString());
3007 result.append(variable_values_.StatString());
3008 result.append(primal_edge_norms_.StatString());
3009 result.append(dual_edge_norms_.StatString());
3010 result.append(update_row_.StatString());
3011 result.append(basis_factorization_.StatString());
3012 result.append(function_stats_.StatString());
3013 return result;
3014}
3015
3016void RevisedSimplex::DisplayAllStats() {
3017 if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3018 absl::FPrintF(stderr, "%s", StatString());
3019 absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3020 }
3021}
3022
3023Fractional RevisedSimplex::ComputeObjectiveValue() const {
3024 SCOPED_TIME_STAT(&function_stats_);
3025 return PreciseScalarProduct(objective_,
3026 Transpose(variable_values_.GetDenseRow()));
3027}
3028
3029Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3030 SCOPED_TIME_STAT(&function_stats_);
3031 const Fractional sum = PreciseScalarProduct(
3032 objective_, Transpose(variable_values_.GetDenseRow()));
3033 return objective_scaling_factor_ * (sum + objective_offset_);
3034}
3035
3036void RevisedSimplex::SetParameters(const GlopParameters& parameters) {
3037 SCOPED_TIME_STAT(&function_stats_);
3038 random_.seed(parameters.random_seed());
3039 initial_parameters_ = parameters;
3040 parameters_ = parameters;
3041 PropagateParameters();
3042}
3043
3044void RevisedSimplex::PropagateParameters() {
3045 SCOPED_TIME_STAT(&function_stats_);
3046 basis_factorization_.SetParameters(parameters_);
3047 entering_variable_.SetParameters(parameters_);
3048 reduced_costs_.SetParameters(parameters_);
3049 dual_edge_norms_.SetParameters(parameters_);
3050 primal_edge_norms_.SetParameters(parameters_);
3051 update_row_.SetParameters(parameters_);
3052}
3053
3054void RevisedSimplex::DisplayIterationInfo() const {
3055 if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3056 const int iter = feasibility_phase_
3057 ? num_iterations_
3058 : num_iterations_ - num_feasibility_iterations_;
3059 // Note that in the dual phase II, ComputeObjectiveValue() is also computing
3060 // the dual objective even if it uses the variable values. This is because
3061 // if we modify the bounds to make the problem primal-feasible, we are at
3062 // the optimal and hence the two objectives are the same.
3063 const Fractional objective =
3064 !feasibility_phase_
3065 ? ComputeInitialProblemObjectiveValue()
3066 : (parameters_.use_dual_simplex()
3067 ? reduced_costs_.ComputeSumOfDualInfeasibilities()
3068 : variable_values_.ComputeSumOfPrimalInfeasibilities());
3069 LOG(INFO) << (feasibility_phase_ ? "Feasibility" : "Optimization")
3070 << " phase, iteration # " << iter
3071 << ", objective = " << absl::StrFormat("%.15E", objective);
3072 }
3073}
3074
3075void RevisedSimplex::DisplayErrors() const {
3076 if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3077 LOG(INFO) << "Primal infeasibility (bounds) = "
3078 << variable_values_.ComputeMaximumPrimalInfeasibility();
3079 LOG(INFO) << "Primal residual |A.x - b| = "
3080 << variable_values_.ComputeMaximumPrimalResidual();
3081 LOG(INFO) << "Dual infeasibility (reduced costs) = "
3082 << reduced_costs_.ComputeMaximumDualInfeasibility();
3083 LOG(INFO) << "Dual residual |c_B - y.B| = "
3084 << reduced_costs_.ComputeMaximumDualResidual();
3085 }
3086}
3087
3088namespace {
3089
3090std::string StringifyMonomialWithFlags(const Fractional a,
3091 const std::string& x) {
3092 return StringifyMonomial(
3093 a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3094}
3095
3096// Returns a string representing the rational approximation of x or a decimal
3097// approximation of x according to
3098// absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3099std::string StringifyWithFlags(const Fractional x) {
3100 return Stringify(x,
3101 absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3102}
3103
3104} // namespace
3105
3106std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3107 std::string output;
3108 VariableType variable_type = variables_info_.GetTypeRow()[col];
3109 VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3110 absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3111 variable_name_[col],
3112 StringifyWithFlags(variable_values_.Get(col)),
3113 GetVariableStatusString(variable_status),
3114 GetVariableTypeString(variable_type),
3115 StringifyWithFlags(lower_bound_[col]),
3116 StringifyWithFlags(upper_bound_[col]));
3117 return output;
3118}
3119
3120void RevisedSimplex::DisplayInfoOnVariables() const {
3121 if (VLOG_IS_ON(3)) {
3122 for (ColIndex col(0); col < num_cols_; ++col) {
3123 const Fractional variable_value = variable_values_.Get(col);
3124 const Fractional objective_coefficient = objective_[col];
3125 const Fractional objective_contribution =
3126 objective_coefficient * variable_value;
3127 VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3128 << StringifyWithFlags(variable_value) << " * "
3129 << StringifyWithFlags(objective_coefficient)
3130 << "(obj) = " << StringifyWithFlags(objective_contribution);
3131 }
3132 VLOG(3) << "------";
3133 }
3134}
3135
3136void RevisedSimplex::DisplayVariableBounds() {
3137 if (VLOG_IS_ON(3)) {
3138 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3139 for (ColIndex col(0); col < num_cols_; ++col) {
3140 switch (variable_type[col]) {
3142 break;
3144 VLOG(3) << variable_name_[col]
3145 << " >= " << StringifyWithFlags(lower_bound_[col]) << ";";
3146 break;
3148 VLOG(3) << variable_name_[col]
3149 << " <= " << StringifyWithFlags(upper_bound_[col]) << ";";
3150 break;
3152 VLOG(3) << StringifyWithFlags(lower_bound_[col])
3153 << " <= " << variable_name_[col]
3154 << " <= " << StringifyWithFlags(upper_bound_[col]) << ";";
3155 break;
3157 VLOG(3) << variable_name_[col] << " = "
3158 << StringifyWithFlags(lower_bound_[col]) << ";";
3159 break;
3160 default: // This should never happen.
3161 LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3162 break;
3163 }
3164 }
3165 }
3166}
3167
3169 const DenseRow* column_scales) {
3170 absl::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3171 for (ColIndex col(0); col < num_cols_; ++col) {
3172 ComputeDirection(col);
3173 for (const auto e : direction_) {
3174 if (column_scales == nullptr) {
3175 dictionary[e.row()].SetCoefficient(col, e.coefficient());
3176 continue;
3177 }
3178 const Fractional numerator =
3179 col < column_scales->size() ? (*column_scales)[col] : 1.0;
3180 const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3181 ? (*column_scales)[GetBasis(e.row())]
3182 : 1.0;
3183 dictionary[e.row()].SetCoefficient(
3184 col, direction_[e.row()] * (numerator / denominator));
3185 }
3186 }
3187 return dictionary;
3188}
3189
3191 const LinearProgram& linear_program, const BasisState& state) {
3192 LoadStateForNextSolve(state);
3193 Status status = Initialize(linear_program);
3194 if (status.ok()) {
3195 variable_values_.RecomputeBasicVariableValues();
3196 variable_values_.ResetPrimalInfeasibilityInformation();
3197 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3198 }
3199}
3200
3201void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3202 if (VLOG_IS_ON(3)) {
3203 // This function has a complexity in O(num_non_zeros_in_matrix).
3204 DisplayInfoOnVariables();
3205
3206 std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3207 const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
3208 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3209 absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3210 variable_name_[col]));
3211 }
3212 VLOG(3) << output << ";";
3213
3214 const RevisedSimplexDictionary dictionary(nullptr, this);
3215 RowIndex r(0);
3216 for (const SparseRow& row : dictionary) {
3217 output.clear();
3218 ColIndex basic_col = basis_[r];
3219 absl::StrAppend(&output, variable_name_[basic_col], " = ",
3220 StringifyWithFlags(variable_values_.Get(basic_col)));
3221 for (const SparseRowEntry e : row) {
3222 if (e.col() != basic_col) {
3223 absl::StrAppend(&output,
3224 StringifyMonomialWithFlags(e.coefficient(),
3225 variable_name_[e.col()]));
3226 }
3227 }
3228 VLOG(3) << output << ";";
3229 }
3230 VLOG(3) << "------";
3231 DisplayVariableBounds();
3232 ++r;
3233 }
3234}
3235
3236void RevisedSimplex::DisplayProblem() const {
3237 // This function has a complexity in O(num_rows * num_cols *
3238 // num_non_zeros_in_row).
3239 if (VLOG_IS_ON(3)) {
3240 DisplayInfoOnVariables();
3241 std::string output = "min: ";
3242 bool has_objective = false;
3243 for (ColIndex col(0); col < num_cols_; ++col) {
3244 const Fractional coeff = objective_[col];
3245 has_objective |= (coeff != 0.0);
3246 absl::StrAppend(&output,
3247 StringifyMonomialWithFlags(coeff, variable_name_[col]));
3248 }
3249 if (!has_objective) {
3250 absl::StrAppend(&output, " 0");
3251 }
3252 VLOG(3) << output << ";";
3253 for (RowIndex row(0); row < num_rows_; ++row) {
3254 output = "";
3255 for (ColIndex col(0); col < num_cols_; ++col) {
3256 absl::StrAppend(&output,
3257 StringifyMonomialWithFlags(
3258 compact_matrix_.column(col).LookUpCoefficient(row),
3259 variable_name_[col]));
3260 }
3261 VLOG(3) << output << " = 0;";
3262 }
3263 VLOG(3) << "------";
3264 }
3265}
3266
3267void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3268 DCHECK(time_limit != nullptr);
3269 const double current_deterministic_time = DeterministicTime();
3270 const double deterministic_time_delta =
3271 current_deterministic_time - last_deterministic_time_update_;
3272 time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3273 last_deterministic_time_update_ = current_deterministic_time;
3274}
3275
3276#undef DCHECK_COL_BOUNDS
3277#undef DCHECK_ROW_BOUNDS
3278
3279} // namespace glop
3280} // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:890
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:884
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
#define VLOG(verboselevel)
Definition: base/logging.h:978
bool empty() const
void push_back(const value_type &x)
void ClearAndResize(IndexType size)
Definition: bitset.h:436
void Set(IndexType i)
Definition: bitset.h:491
bool IsSet(IndexType i) const
Definition: bitset.h:481
std::string StatString() const
Definition: stats.cc:71
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
const ColumnPermutation & GetColumnPermutation() const
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
void SetParameters(const GlopParameters &parameters)
Fractional LookUpCoefficient(RowIndex index) const
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse_column.h:83
RowIndex EntryRow(EntryIndex i) const
Definition: sparse_column.h:89
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:418
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:405
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:456
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:393
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
ColumnView column(ColIndex col) const
Definition: sparse.h:364
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
void SetParameters(const GlopParameters &parameters)
ABSL_MUST_USE_RESULT Status PrimalChooseEnteringColumn(ColIndex *entering_col)
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col, Fractional *step)
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col, Fractional *step)
void SetPricingRule(GlopParameters::PricingRule rule)
void SetParameters(const GlopParameters &parameters)
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
void TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void SetParameters(const GlopParameters &parameters)
bool TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction, Fractional *reduced_cost)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Fractional GetDualFeasibilityTolerance() const
Fractional ComputeMaximumDualInfeasibility() const
void MaintainDualInfeasiblePositions(bool maintain)
void SetParameters(const GlopParameters &parameters)
const DenseRow & GetDualRayRowCombination() const
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
Fractional GetConstraintActivity(RowIndex row) const
VariableStatus GetVariableStatus(ColIndex col) const
Fractional GetReducedCost(ColIndex col) const
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
Fractional GetDualValue(RowIndex row) const
ConstraintStatus GetConstraintStatus(RowIndex row) const
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
void LoadStateForNextSolve(const BasisState &state)
const BasisFactorization & GetBasisFactorization() const
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
static const Status OK()
Definition: status.h:54
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:51
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:244
void IgnoreUpdatePosition(ColIndex col)
Definition: update_row.cc:45
const Fractional GetCoefficient(ColIndex col) const
Definition: update_row.h:66
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:78
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:174
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:170
std::string StatString() const
Definition: update_row.h:81
void Set(ColIndex col, Fractional value)
const DenseColumn & GetPrimalSquaredInfeasibilities() const
void UpdateGivenNonBasicVariables(const std::vector< ColIndex > &cols_to_update, bool update_basic_variables)
const DenseBitColumn & GetPrimalInfeasiblePositions() const
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
const Fractional Get(ColIndex col) const
void UpdatePrimalInfeasibilityInformation(const std::vector< RowIndex > &rows)
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
const DenseBitRow & GetIsBasicBitRow() const
const DenseBitRow & GetNonBasicBoxedVariables() const
Fractional GetBoundDifference(ColIndex col) const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetCanDecreaseBitRow() const
const VariableTypeRow & GetTypeRow() const
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
const DenseBitRow & GetNotBasicBitRow() const
const VariableStatusRow & GetStatusRow() const
const DenseBitRow & GetIsRelevantBitRow() const
void Update(ColIndex col, VariableStatus status)
SatParameters parameters
SharedTimeLimit * time_limit
int64 value
int64_t int64
uint64_t uint64
const int WARNING
Definition: log_severity.h:31
const int INFO
Definition: log_severity.h:31
const bool DEBUG_MODE
Definition: macros.h:24
ColIndex col
Definition: markowitz.cc:176
std::string StringifyMonomial(const Fractional a, const std::string &x, bool fraction)
bool IsFinite(Fractional value)
Definition: lp_types.h:90
bool IsRightMostSquareMatrixIdentity(const SparseMatrix &matrix)
Fractional InfinityNorm(const DenseColumn &v)
std::string Stringify(const Fractional x, bool fraction)
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Definition: lp_types.h:317
Fractional Square(Fractional f)
const ColIndex kInvalidCol(-1)
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
Permutation< ColIndex > ColumnPermutation
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Definition: lp_types.h:320
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
constexpr const uint64 kDeterministicSeed
Index ColToIntIndex(ColIndex col)
Definition: lp_types.h:54
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
const DenseRow & Transpose(const DenseColumn &col)
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:323
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
const RowIndex kInvalidRow(-1)
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition: lp_types.h:342
std::string GetVariableTypeString(VariableType variable_type)
Definition: lp_types.cc:52
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:328
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Definition: lp_types.h:331
std::string GetVariableStatusString(VariableStatus status)
Definition: lp_types.cc:71
const double kInfinity
Definition: lp_types.h:83
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:434
STL namespace.
int index
Definition: pack.cc:508
if(!yyg->yy_init)
Definition: parser.yy.cc:965
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
Fractional coeff_magnitude
#define DCHECK_ROW_BOUNDS(row)
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
Fractional target_bound
RowIndex row
#define DCHECK_COL_BOUNDS(col)
Fractional ratio
int64 cost
IntVar *const objective_
Definition: search.cc:2951
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:70
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Definition: status.h:85
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
StrictITIVector< Index, Fractional > values
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:41