OR-Tools  8.2
reduced_costs.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <random>
17
18#ifdef OMP
19#include <omp.h>
20#endif
21
23
24namespace operations_research {
25namespace glop {
26
28 const DenseRow& objective,
29 const RowToColMapping& basis,
30 const VariablesInfo& variables_info,
31 const BasisFactorization& basis_factorization,
32 random_engine_t* random)
33 : matrix_(matrix),
34 objective_(objective),
35 basis_(basis),
36 variables_info_(variables_info),
37 basis_factorization_(basis_factorization),
38 random_(random),
39 parameters_(),
40 stats_(),
41 must_refactorize_basis_(false),
42 recompute_basic_objective_left_inverse_(true),
43 recompute_basic_objective_(true),
44 recompute_reduced_costs_(true),
45 are_reduced_costs_precise_(false),
46 are_reduced_costs_recomputed_(false),
47 basic_objective_(),
48 reduced_costs_(),
49 basic_objective_left_inverse_(),
50 dual_feasibility_tolerance_(),
51 is_dual_infeasible_(),
52 are_dual_infeasible_positions_maintained_(false) {}
53
55 return must_refactorize_basis_;
56}
57
59 ColIndex entering_col, const ScatteredColumn& direction,
60 Fractional* reduced_cost) {
61 SCOPED_TIME_STAT(&stats_);
62 if (recompute_basic_objective_) {
63 ComputeBasicObjective();
64 }
65 const Fractional old_reduced_cost = reduced_costs_[entering_col];
66 const Fractional precise_reduced_cost =
67 objective_[entering_col] + cost_perturbations_[entering_col] -
68 PreciseScalarProduct(basic_objective_, direction);
69
70 // Update the reduced cost of the entering variable with the precise version.
71 reduced_costs_[entering_col] = precise_reduced_cost;
72 *reduced_cost = precise_reduced_cost;
73 if (are_dual_infeasible_positions_maintained_) {
74 is_dual_infeasible_.Set(entering_col,
75 IsValidPrimalEnteringCandidate(entering_col));
76
77 // Check if the entering column is still a valid candidate.
78 if (!is_dual_infeasible_.IsSet(entering_col)) {
79 // If we don't have the reduced cost with maximum precision, we
80 // return false and the next ChooseEnteringColumn() will recompute them.
81 // If they are already precise, we will skip this one (since it is no
82 // longer a candidate).
83 if (!are_reduced_costs_precise_) {
85 }
86 return false;
87 }
88 }
89
90 // At this point, we have an entering variable that will move the objective in
91 // the good direction. We check the precision of the reduced cost and edges
92 // norm, but even if they are imprecise, we finish this pivot and will
93 // recompute them during the next call to ChooseEnteringColumn().
94
95 // Estimate the accuracy of the reduced costs using the entering variable.
96 if (!recompute_reduced_costs_) {
97 const Fractional estimated_reduced_costs_accuracy =
98 old_reduced_cost - precise_reduced_cost;
99 const Fractional scale =
100 (std::abs(precise_reduced_cost) <= 1.0) ? 1.0 : precise_reduced_cost;
101 stats_.reduced_costs_accuracy.Add(estimated_reduced_costs_accuracy / scale);
102 if (std::abs(estimated_reduced_costs_accuracy) / scale >
103 parameters_.recompute_reduced_costs_threshold()) {
104 VLOG(1) << "Recomputing reduced costs, value = " << precise_reduced_cost
105 << " error = "
106 << std::abs(precise_reduced_cost - old_reduced_cost);
108 }
109 }
110 return true;
111}
112
114 SCOPED_TIME_STAT(&stats_);
115 DCHECK(!recompute_reduced_costs_);
116 if (recompute_reduced_costs_) return 0.0;
117
118 // The current reduced costs of the slack columns are the opposite of the dual
119 // values. Note that they are updated by UpdateBeforeBasisPivot().
120 const RowIndex num_rows = matrix_.num_rows();
121 const ColIndex first_slack_col = matrix_.num_cols() - RowToColIndex(num_rows);
122 DenseRow dual_values(RowToColIndex(num_rows), 0.0);
123 for (RowIndex row(0); row < num_rows; ++row) {
124 const ColIndex row_as_col = RowToColIndex(row);
125 const ColIndex slack_col = first_slack_col + row_as_col;
126 dual_values[row_as_col] = objective_[slack_col] +
127 cost_perturbations_[slack_col] -
128 reduced_costs_[slack_col];
129 }
130 Fractional dual_residual_error(0.0);
131 for (RowIndex row(0); row < num_rows; ++row) {
132 const ColIndex basic_col = basis_[row];
133 const Fractional residual =
134 objective_[basic_col] + cost_perturbations_[basic_col] -
135 matrix_.ColumnScalarProduct(basic_col, dual_values);
136 dual_residual_error = std::max(dual_residual_error, std::abs(residual));
137 }
138 return dual_residual_error;
139}
140
142 SCOPED_TIME_STAT(&stats_);
143 DCHECK(!recompute_reduced_costs_);
144 if (recompute_reduced_costs_) return 0.0;
145 Fractional maximum_dual_infeasibility = 0.0;
146 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
147 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
148 for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
149 const Fractional rc = reduced_costs_[col];
150 if ((can_increase.IsSet(col) && rc < 0.0) ||
151 (can_decrease.IsSet(col) && rc > 0.0)) {
152 maximum_dual_infeasibility =
153 std::max(maximum_dual_infeasibility, std::abs(rc));
154 }
155 }
156 return maximum_dual_infeasibility;
157}
158
160 SCOPED_TIME_STAT(&stats_);
161 DCHECK(!recompute_reduced_costs_);
162 if (recompute_reduced_costs_) return 0.0;
163 Fractional dual_infeasibility_sum = 0.0;
164 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
165 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
166 for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
167 const Fractional rc = reduced_costs_[col];
168 if ((can_increase.IsSet(col) && rc < 0.0) ||
169 (can_decrease.IsSet(col) && rc > 0.0)) {
170 dual_infeasibility_sum += std::abs(std::abs(rc));
171 }
172 }
173 return dual_infeasibility_sum;
174}
175
176void ReducedCosts::UpdateBeforeBasisPivot(ColIndex entering_col,
177 RowIndex leaving_row,
178 const ScatteredColumn& direction,
179 UpdateRow* update_row) {
180 SCOPED_TIME_STAT(&stats_);
181 const ColIndex leaving_col = basis_[leaving_row];
182 DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
183 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
184
185 if (are_dual_infeasible_positions_maintained_) {
186 is_dual_infeasible_.Clear(entering_col);
187 }
188 UpdateReducedCosts(entering_col, leaving_col, leaving_row,
189 direction[leaving_row], update_row);
190 if (are_dual_infeasible_positions_maintained_) {
191 UpdateEnteringCandidates(update_row->GetNonZeroPositions());
193 }
194
195 // Note that it is important to update basic_objective_ AFTER calling
196 // UpdateReducedCosts().
197 UpdateBasicObjective(entering_col, leaving_row);
198}
199
201 SCOPED_TIME_STAT(&stats_);
202 is_dual_infeasible_.Clear(col);
204}
205
207 Fractional* current_cost) {
208 DCHECK_NE(variables_info_.GetStatusRow()[col], VariableStatus::BASIC);
209 DCHECK_EQ(current_cost, &objective_[col]);
210 reduced_costs_[col] -= objective_[col];
211 *current_cost = 0.0;
212}
213
214void ReducedCosts::SetParameters(const GlopParameters& parameters) {
215 parameters_ = parameters;
216}
217
219 SCOPED_TIME_STAT(&stats_);
220 recompute_basic_objective_ = true;
221 recompute_basic_objective_left_inverse_ = true;
222 recompute_reduced_costs_ = true;
223 are_reduced_costs_precise_ = false;
224}
225
227 SCOPED_TIME_STAT(&stats_);
228 recompute_basic_objective_ = true;
229 recompute_basic_objective_left_inverse_ = true;
230}
231
233 SCOPED_TIME_STAT(&stats_);
234 if (are_reduced_costs_precise_) return;
235 must_refactorize_basis_ = true;
236 recompute_basic_objective_left_inverse_ = true;
237 recompute_reduced_costs_ = true;
238}
239
241 SCOPED_TIME_STAT(&stats_);
242 VLOG(1) << "Perturbing the costs ... ";
243
244 Fractional max_cost_magnitude = 0.0;
245 const ColIndex structural_size =
246 matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
247 for (ColIndex col(0); col < structural_size; ++col) {
248 max_cost_magnitude =
249 std::max(max_cost_magnitude, std::abs(objective_[col]));
250 }
251
252 cost_perturbations_.AssignToZero(matrix_.num_cols());
253 for (ColIndex col(0); col < structural_size; ++col) {
254 const Fractional objective = objective_[col];
255 const Fractional magnitude =
256 (1.0 + std::uniform_real_distribution<double>()(*random_)) *
257 (parameters_.relative_cost_perturbation() * std::abs(objective) +
258 parameters_.relative_max_cost_perturbation() * max_cost_magnitude);
259 DCHECK_GE(magnitude, 0.0);
260
261 // The perturbation direction is such that a dual-feasible solution stays
262 // feasible. This is important.
263 const VariableType type = variables_info_.GetTypeRow()[col];
264 switch (type) {
266 break;
268 break;
270 cost_perturbations_[col] = magnitude;
271 break;
273 cost_perturbations_[col] = -magnitude;
274 break;
276 // Here we don't necessarily maintain the dual-feasibility of a dual
277 // feasible solution, however we can always shift the variable to its
278 // other bound (because it is boxed) to restore dual-feasiblity. This is
279 // done by MakeBoxedVariableDualFeasible() at the end of the dual
280 // phase-I algorithm.
281 if (objective > 0.0) {
282 cost_perturbations_[col] = magnitude;
283 } else if (objective < 0.0) {
284 cost_perturbations_[col] = -magnitude;
285 }
286 break;
287 }
288 }
289}
290
292 SCOPED_TIME_STAT(&stats_);
293 const Fractional kToleranceFactor = parameters_.degenerate_ministep_factor();
294 const Fractional small_step =
295 dual_feasibility_tolerance_ *
296 (reduced_costs_[col] > 0.0 ? kToleranceFactor : -kToleranceFactor);
297 IF_STATS_ENABLED(stats_.cost_shift.Add(reduced_costs_[col] + small_step));
298 cost_perturbations_[col] -= reduced_costs_[col] + small_step;
299 reduced_costs_[col] = -small_step;
300}
301
303 SCOPED_TIME_STAT(&stats_);
304 cost_perturbations_.AssignToZero(matrix_.num_cols());
305 recompute_basic_objective_ = true;
306 recompute_basic_objective_left_inverse_ = true;
307 recompute_reduced_costs_ = true;
308 are_reduced_costs_precise_ = false;
309}
310
312 are_dual_infeasible_positions_maintained_ = maintain;
313 if (are_dual_infeasible_positions_maintained_ && !recompute_reduced_costs_) {
314 ResetDualInfeasibilityBitSet();
315 }
316}
317
319 SCOPED_TIME_STAT(&stats_);
320 RecomputeReducedCostsAndPrimalEnteringCandidatesIfNeeded();
321 return reduced_costs_;
322}
323
325 SCOPED_TIME_STAT(&stats_);
326 ComputeBasicObjectiveLeftInverse();
327 return Transpose(basic_objective_left_inverse_.values);
328}
329
330void ReducedCosts::RecomputeReducedCostsAndPrimalEnteringCandidatesIfNeeded() {
331 if (basis_factorization_.IsRefactorized()) {
332 must_refactorize_basis_ = false;
333 }
334 if (recompute_reduced_costs_) {
335 ComputeReducedCosts();
336 if (are_dual_infeasible_positions_maintained_) {
337 ResetDualInfeasibilityBitSet();
338 }
339 }
340}
341
342void ReducedCosts::ComputeBasicObjective() {
343 SCOPED_TIME_STAT(&stats_);
344 const ColIndex num_cols_in_basis = RowToColIndex(matrix_.num_rows());
345 cost_perturbations_.resize(matrix_.num_cols(), 0.0);
346 basic_objective_.resize(num_cols_in_basis, 0.0);
347 for (ColIndex col(0); col < num_cols_in_basis; ++col) {
348 const ColIndex basis_col = basis_[ColToRowIndex(col)];
349 basic_objective_[col] =
350 objective_[basis_col] + cost_perturbations_[basis_col];
351 }
352 recompute_basic_objective_ = false;
353 recompute_basic_objective_left_inverse_ = true;
354}
355
356void ReducedCosts::ComputeReducedCosts() {
357 SCOPED_TIME_STAT(&stats_);
358 if (recompute_basic_objective_left_inverse_) {
359 ComputeBasicObjectiveLeftInverse();
360 }
361 Fractional dual_residual_error(0.0);
362 const ColIndex num_cols = matrix_.num_cols();
363
364 reduced_costs_.resize(num_cols, 0.0);
365 const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
366#ifdef OMP
367 const int num_omp_threads = parameters_.num_omp_threads();
368#else
369 const int num_omp_threads = 1;
370#endif
371 if (num_omp_threads == 1) {
372 for (ColIndex col(0); col < num_cols; ++col) {
373 reduced_costs_[col] = objective_[col] + cost_perturbations_[col] -
374 matrix_.ColumnScalarProduct(
375 col, basic_objective_left_inverse_.values);
376
377 // We also compute the dual residual error y.B - c_B.
378 if (is_basic.IsSet(col)) {
379 dual_residual_error =
380 std::max(dual_residual_error, std::abs(reduced_costs_[col]));
381 }
382 }
383 } else {
384#ifdef OMP
385 // In the multi-threaded case, perform the same computation as in the
386 // single-threaded case above.
387 std::vector<Fractional> thread_local_dual_residual_error(num_omp_threads,
388 0.0);
389 const int parallel_loop_size = num_cols.value();
390#pragma omp parallel for num_threads(num_omp_threads)
391 for (int i = 0; i < parallel_loop_size; i++) {
392 const ColIndex col(i);
393 reduced_costs_[col] = objective_[col] + objective_perturbation_[col] -
394 matrix_.ColumnScalarProduct(
395 col, basic_objective_left_inverse_.values);
396
397 if (is_basic.IsSet(col)) {
398 thread_local_dual_residual_error[omp_get_thread_num()] =
399 std::max(thread_local_dual_residual_error[omp_get_thread_num()],
400 std::abs(reduced_costs_[col]));
401 }
402 }
403 // end of omp parallel for
404 for (int i = 0; i < num_omp_threads; i++) {
405 dual_residual_error =
406 std::max(dual_residual_error, thread_local_dual_residual_error[i]);
407 }
408#endif // OMP
409 }
410
411 recompute_reduced_costs_ = false;
412 are_reduced_costs_recomputed_ = true;
413 are_reduced_costs_precise_ = basis_factorization_.IsRefactorized();
414
415 // It is not resonable to have a dual tolerance lower than the current
416 // dual_residual_error, otherwise we may never terminate (This is happening on
417 // dfl001.mps with a low dual_feasibility_tolerance). Note that since we
418 // recompute the reduced costs with maximum precision before really exiting,
419 // it is fine to do a couple of iterations with a high zero tolerance.
420 dual_feasibility_tolerance_ = parameters_.dual_feasibility_tolerance();
421 if (dual_residual_error > dual_feasibility_tolerance_) {
422 VLOG(2) << "Changing dual_feasibility_tolerance to " << dual_residual_error;
423 dual_feasibility_tolerance_ = dual_residual_error;
424 }
425}
426
427void ReducedCosts::ComputeBasicObjectiveLeftInverse() {
428 SCOPED_TIME_STAT(&stats_);
429 if (recompute_basic_objective_) {
430 ComputeBasicObjective();
431 }
432 basic_objective_left_inverse_.values = basic_objective_;
433 basic_objective_left_inverse_.non_zeros.clear();
434 basis_factorization_.LeftSolve(&basic_objective_left_inverse_);
435 recompute_basic_objective_left_inverse_ = false;
436 IF_STATS_ENABLED(stats_.basic_objective_left_inverse_density.Add(
437 Density(basic_objective_left_inverse_.values)));
438
439 // TODO(user): Estimate its accuracy by a few scalar products, and refactorize
440 // if it is above a threshold?
441}
442
443// Note that the update is such than the entering reduced cost is always set to
444// 0.0. In particular, because of this we can step in the wrong direction for
445// the dual method if the reduced cost is slightly infeasible.
446void ReducedCosts::UpdateReducedCosts(ColIndex entering_col,
447 ColIndex leaving_col,
448 RowIndex leaving_row, Fractional pivot,
449 UpdateRow* update_row) {
450 DCHECK_NE(entering_col, leaving_col);
451 DCHECK_NE(pivot, 0.0);
452 if (recompute_reduced_costs_) return;
453
454 // Note that this is precise because of the CheckPrecision().
455 const Fractional entering_reduced_cost = reduced_costs_[entering_col];
456
457 // Nothing to do if the entering reduced cost is 0.0.
458 // This correspond to a dual degenerate pivot.
459 if (entering_reduced_cost == 0.0) {
460 VLOG(2) << "Reduced costs didn't change.";
461
462 // TODO(user): the reduced costs may still be "precise" in this case, but
463 // other parts of the code assume that if they are precise then the basis
464 // was just refactorized in order to recompute them which is not the case
465 // here. Clean this up.
466 are_reduced_costs_precise_ = false;
467 return;
468 }
469
470 are_reduced_costs_recomputed_ = false;
471 update_row->ComputeUpdateRow(leaving_row);
472 SCOPED_TIME_STAT(&stats_);
473
474 const ColIndex first_slack_col =
475 matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
476
477 // Update the leaving variable reduced cost.
478 // '-pivot' is the value of the entering_edge at 'leaving_row'.
479 // The edge of the 'leaving_col' in the new basis is equal to
480 // 'entering_edge / -pivot'.
481 const Fractional new_leaving_reduced_cost = entering_reduced_cost / -pivot;
482 for (const ColIndex col : update_row->GetNonZeroPositions()) {
483 // Because the columns are in order, it is safe to break here.
484 if (col >= first_slack_col) break;
485 const Fractional coeff = update_row->GetCoefficient(col);
486 reduced_costs_[col] += new_leaving_reduced_cost * coeff;
487 }
488 are_reduced_costs_precise_ = false;
489
490 // Always update the slack variable position so we have the dual values and
491 // we can use them in ComputeCurrentDualResidualError().
492 const ScatteredRow& unit_row_left_inverse =
493 update_row->GetUnitRowLeftInverse();
494 if (unit_row_left_inverse.non_zeros.empty()) {
495 const ColIndex num_cols = unit_row_left_inverse.values.size();
496 for (ColIndex col(0); col < num_cols; ++col) {
497 const ColIndex slack_col = first_slack_col + col;
498 const Fractional coeff = unit_row_left_inverse[col];
499 reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
500 }
501 } else {
502 for (const ColIndex col : unit_row_left_inverse.non_zeros) {
503 const ColIndex slack_col = first_slack_col + col;
504 const Fractional coeff = unit_row_left_inverse[col];
505 reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
506 }
507 }
508 reduced_costs_[leaving_col] = new_leaving_reduced_cost;
509
510 // In the dual, since we compute the update before selecting the entering
511 // variable, this cost is still in the update_position_list, so we make sure
512 // it is 0 here.
513 reduced_costs_[entering_col] = 0.0;
514}
515
517 const Fractional reduced_cost = reduced_costs_[col];
518 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
519 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
520 const Fractional tolerance = dual_feasibility_tolerance_;
521 return (can_increase.IsSet(col) && (reduced_cost < -tolerance)) ||
522 (can_decrease.IsSet(col) && (reduced_cost > tolerance));
523}
524
525void ReducedCosts::ResetDualInfeasibilityBitSet() {
526 SCOPED_TIME_STAT(&stats_);
527 const ColIndex num_cols = matrix_.num_cols();
528 is_dual_infeasible_.ClearAndResize(num_cols);
529 UpdateEnteringCandidates(variables_info_.GetIsRelevantBitRow());
530}
531
532// A variable is an entering candidate if it can move in a direction that
533// minimizes the objective. That is, its value needs to increase if its
534// reduced cost is negative or it needs to decrease if its reduced cost is
535// positive (see the IsValidPrimalEnteringCandidate() function). Note that
536// this is the same as a dual-infeasible variable.
537//
538// Optimization for speed (The function is about 40% faster than the code in
539// IsValidPrimalEnteringCandidate() or a switch() on variable_status[col]). This
540// relies on the fact that (double1 > double2) returns a 1 or 0 result when
541// converted to an int. It also uses an XOR (which appears to be faster) since
542// the two conditions on the reduced cost are exclusive.
543template <typename ColumnsToUpdate>
544void ReducedCosts::UpdateEnteringCandidates(const ColumnsToUpdate& cols) {
545 SCOPED_TIME_STAT(&stats_);
546 const Fractional tolerance = dual_feasibility_tolerance_;
547 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
548 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
549 for (const ColIndex col : cols) {
550 const Fractional reduced_cost = reduced_costs_[col];
551 is_dual_infeasible_.SetBitFromOtherBitSets(
552 col, can_decrease, reduced_cost > tolerance, can_increase,
553 reduced_cost < -tolerance);
554 DCHECK_EQ(is_dual_infeasible_.IsSet(col),
556 }
557}
558
559void ReducedCosts::UpdateBasicObjective(ColIndex entering_col,
560 RowIndex leaving_row) {
561 SCOPED_TIME_STAT(&stats_);
562 basic_objective_[RowToColIndex(leaving_row)] =
563 objective_[entering_col] + cost_perturbations_[entering_col];
564 recompute_basic_objective_left_inverse_ = true;
565}
566
567} // namespace glop
568} // namespace operations_research
int64 max
Definition: alldiff_cst.cc:139
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define DCHECK(condition)
Definition: base/logging.h:884
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
#define VLOG(verboselevel)
Definition: base/logging.h:978
void ClearAndResize(IndexType size)
Definition: bitset.h:436
void SetBitFromOtherBitSets(IndexType i, const Bitset64< IndexType > &other1, uint64 use1, const Bitset64< IndexType > &other2, uint64 use2)
Definition: bitset.h:641
void Clear(IndexType i)
Definition: bitset.h:453
void Set(IndexType i)
Definition: bitset.h:491
bool IsSet(IndexType i) const
Definition: bitset.h:481
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:382
ReducedCosts(const CompactSparseMatrix &matrix_, const DenseRow &objective, const RowToColMapping &basis, const VariablesInfo &variables_info, const BasisFactorization &basis_factorization, random_engine_t *random)
bool TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction, Fractional *reduced_cost)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Fractional ComputeMaximumDualInfeasibility() const
void MaintainDualInfeasiblePositions(bool maintain)
Fractional ComputeSumOfDualInfeasibilities() const
void SetParameters(const GlopParameters &parameters)
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:170
const DenseBitRow & GetIsBasicBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetCanDecreaseBitRow() const
const VariableTypeRow & GetTypeRow() const
const VariableStatusRow & GetStatusRow() const
const DenseBitRow & GetIsRelevantBitRow() const
SatParameters parameters
ColIndex col
Definition: markowitz.cc:176
RowIndex row
Definition: markowitz.cc:175
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:51
double Density(const DenseRow &row)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
const DenseRow & Transpose(const DenseColumn &col)
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:323
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
std::mt19937 random_engine_t
Definition: random_engine.h:23
IntVar *const objective_
Definition: search.cc:2951
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
StrictITIVector< Index, Fractional > values