OR-Tools  8.2
encoding.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <deque>
18#include <memory>
19#include <queue>
20
21#include "ortools/sat/sat_parameters.pb.h"
22
23namespace operations_research {
24namespace sat {
25
27 : depth_(0),
28 lb_(0),
29 ub_(1),
30 for_sorting_(l.Variable()),
31 child_a_(nullptr),
32 child_b_(nullptr),
33 literals_(1, l) {}
34
36 SatSolver* solver) {
37 CHECK(literals_.empty()) << "Already initialized";
38 CHECK_GT(n, 0);
39 const BooleanVariable first_var_index(solver->NumVariables());
40 solver->SetNumVariables(solver->NumVariables() + n);
41 for (int i = 0; i < n; ++i) {
42 literals_.push_back(Literal(first_var_index + i, true));
43 if (i > 0) {
44 solver->AddBinaryClause(literal(i - 1), literal(i).Negated());
45 }
46 }
47 lb_ = a->lb_ + b->lb_;
48 ub_ = lb_ + n;
49 depth_ = 1 + std::max(a->depth_, b->depth_);
50 child_a_ = a;
51 child_b_ = b;
52 for_sorting_ = first_var_index;
53}
54
56 SatSolver* solver) {
57 CHECK(literals_.empty()) << "Already initialized";
58 const BooleanVariable first_var_index(solver->NumVariables());
59 solver->SetNumVariables(solver->NumVariables() + 1);
60 literals_.emplace_back(first_var_index, true);
61 child_a_ = a;
62 child_b_ = b;
63 ub_ = a->ub_ + b->ub_;
64 lb_ = a->lb_ + b->lb_;
65 depth_ = 1 + std::max(a->depth_, b->depth_);
66
67 // Merging the node of the same depth in order seems to help a bit.
68 for_sorting_ = std::min(a->for_sorting_, b->for_sorting_);
69}
70
72 CHECK(!literals_.empty());
73 if (current_ub() == ub_) return false;
74 literals_.emplace_back(BooleanVariable(solver->NumVariables()), true);
75 solver->SetNumVariables(solver->NumVariables() + 1);
76 solver->AddBinaryClause(literals_.back().Negated(),
77 literals_[literals_.size() - 2]);
78 return true;
79}
80
81int EncodingNode::Reduce(const SatSolver& solver) {
82 int i = 0;
83 while (i < literals_.size() &&
84 solver.Assignment().LiteralIsTrue(literals_[i])) {
85 ++i;
86 ++lb_;
87 }
88 literals_.erase(literals_.begin(), literals_.begin() + i);
89 while (!literals_.empty() &&
90 solver.Assignment().LiteralIsFalse(literals_.back())) {
91 literals_.pop_back();
92 ub_ = lb_ + literals_.size();
93 }
94 return i;
95}
96
97void EncodingNode::ApplyUpperBound(int64 upper_bound, SatSolver* solver) {
98 if (size() <= upper_bound) return;
99 for (int i = upper_bound; i < size(); ++i) {
100 solver->AddUnitClause(literal(i).Negated());
101 }
102 literals_.resize(upper_bound);
103 ub_ = lb_ + literals_.size();
104}
105
107 EncodingNode n;
108 n.InitializeLazyNode(a, b, solver);
109 solver->AddBinaryClause(a->literal(0).Negated(), n.literal(0));
110 solver->AddBinaryClause(b->literal(0).Negated(), n.literal(0));
111 solver->AddTernaryClause(n.literal(0).Negated(), a->literal(0),
112 b->literal(0));
113 return n;
114}
115
117 if (!node->IncreaseCurrentUB(solver)) return;
118 std::vector<EncodingNode*> to_process;
119 to_process.push_back(node);
120
121 // Only one side of the constraint is mandatory (the one propagating the ones
122 // to the top of the encoding tree), and it seems more efficient not to encode
123 // the other side.
124 //
125 // TODO(user): Experiment more.
126 const bool complete_encoding = false;
127
128 while (!to_process.empty()) {
129 EncodingNode* n = to_process.back();
130 EncodingNode* a = n->child_a();
131 EncodingNode* b = n->child_b();
132 to_process.pop_back();
133
134 // Note that since we were able to increase its size, n must have children.
135 // n->GreaterThan(target) is the new literal of n.
136 CHECK(a != nullptr);
137 CHECK(b != nullptr);
138 CHECK_GE(n->size(), 2);
139 const int target = n->current_ub() - 1;
140
141 // Add a literal to a if needed.
142 // That is, now that the node n can go up to it new current_ub, if we need
143 // to increase the current_ub of a.
144 if (a->current_ub() != a->ub()) {
145 CHECK_GE(a->current_ub() - 1 + b->lb(), target - 1);
146 if (a->current_ub() - 1 + b->lb() < target) {
147 CHECK(a->IncreaseCurrentUB(solver));
148 to_process.push_back(a);
149 }
150 }
151
152 // Add a literal to b if needed.
153 if (b->current_ub() != b->ub()) {
154 CHECK_GE(b->current_ub() - 1 + a->lb(), target - 1);
155 if (b->current_ub() - 1 + a->lb() < target) {
156 CHECK(b->IncreaseCurrentUB(solver));
157 to_process.push_back(b);
158 }
159 }
160
161 // Wire the new literal of n correctly with its two children.
162 for (int ia = a->lb(); ia < a->current_ub(); ++ia) {
163 const int ib = target - ia;
164 if (complete_encoding && ib >= b->lb() && ib < b->current_ub()) {
165 // if x <= ia and y <= ib then x + y <= ia + ib.
166 solver->AddTernaryClause(n->GreaterThan(target).Negated(),
167 a->GreaterThan(ia), b->GreaterThan(ib));
168 }
169 if (complete_encoding && ib == b->ub()) {
170 solver->AddBinaryClause(n->GreaterThan(target).Negated(),
171 a->GreaterThan(ia));
172 }
173
174 if (ib - 1 == b->lb() - 1) {
175 solver->AddBinaryClause(n->GreaterThan(target),
176 a->GreaterThan(ia).Negated());
177 }
178 if ((ib - 1) >= b->lb() && (ib - 1) < b->current_ub()) {
179 // if x > ia and y > ib - 1 then x + y > ia + ib.
180 solver->AddTernaryClause(n->GreaterThan(target),
181 a->GreaterThan(ia).Negated(),
182 b->GreaterThan(ib - 1).Negated());
183 }
184 }
185
186 // Case ia = a->lb() - 1; a->GreaterThan(ia) always true.
187 {
188 const int ib = target - (a->lb() - 1);
189 if ((ib - 1) == b->lb() - 1) {
190 solver->AddUnitClause(n->GreaterThan(target));
191 }
192 if ((ib - 1) >= b->lb() && (ib - 1) < b->current_ub()) {
193 solver->AddBinaryClause(n->GreaterThan(target),
194 b->GreaterThan(ib - 1).Negated());
195 }
196 }
197
198 // case ia == a->ub; a->GreaterThan(ia) always false.
199 {
200 const int ib = target - a->ub();
201 if (complete_encoding && ib >= b->lb() && ib < b->current_ub()) {
202 solver->AddBinaryClause(n->GreaterThan(target).Negated(),
203 b->GreaterThan(ib));
204 }
205 if (ib == b->ub()) {
206 solver->AddUnitClause(n->GreaterThan(target).Negated());
207 }
208 }
209 }
210}
211
212EncodingNode FullMerge(Coefficient upper_bound, EncodingNode* a,
213 EncodingNode* b, SatSolver* solver) {
214 EncodingNode n;
215 const int size =
216 std::min(Coefficient(a->size() + b->size()), upper_bound).value();
217 n.InitializeFullNode(size, a, b, solver);
218 for (int ia = 0; ia < a->size(); ++ia) {
219 if (ia + b->size() < size) {
220 solver->AddBinaryClause(n.literal(ia + b->size()).Negated(),
221 a->literal(ia));
222 }
223 if (ia < size) {
224 solver->AddBinaryClause(n.literal(ia), a->literal(ia).Negated());
225 } else {
226 // Fix the variable to false because of the given upper_bound.
227 solver->AddUnitClause(a->literal(ia).Negated());
228 }
229 }
230 for (int ib = 0; ib < b->size(); ++ib) {
231 if (ib + a->size() < size) {
232 solver->AddBinaryClause(n.literal(ib + a->size()).Negated(),
233 b->literal(ib));
234 }
235 if (ib < size) {
236 solver->AddBinaryClause(n.literal(ib), b->literal(ib).Negated());
237 } else {
238 // Fix the variable to false because of the given upper_bound.
239 solver->AddUnitClause(b->literal(ib).Negated());
240 }
241 }
242 for (int ia = 0; ia < a->size(); ++ia) {
243 for (int ib = 0; ib < b->size(); ++ib) {
244 if (ia + ib < size) {
245 // if x <= ia and y <= ib, then x + y <= ia + ib.
246 solver->AddTernaryClause(n.literal(ia + ib).Negated(), a->literal(ia),
247 b->literal(ib));
248 }
249 if (ia + ib + 1 < size) {
250 // if x > ia and y > ib, then x + y > ia + ib + 1.
251 solver->AddTernaryClause(n.literal(ia + ib + 1),
252 a->literal(ia).Negated(),
253 b->literal(ib).Negated());
254 } else {
255 solver->AddBinaryClause(a->literal(ia).Negated(),
256 b->literal(ib).Negated());
257 }
258 }
259 }
260 return n;
261}
262
263EncodingNode* MergeAllNodesWithDeque(Coefficient upper_bound,
264 const std::vector<EncodingNode*>& nodes,
265 SatSolver* solver,
266 std::deque<EncodingNode>* repository) {
267 std::deque<EncodingNode*> dq(nodes.begin(), nodes.end());
268 while (dq.size() > 1) {
269 EncodingNode* a = dq.front();
270 dq.pop_front();
271 EncodingNode* b = dq.front();
272 dq.pop_front();
273 repository->push_back(FullMerge(upper_bound, a, b, solver));
274 dq.push_back(&repository->back());
275 }
276 return dq.front();
277}
278
279namespace {
280struct SortEncodingNodePointers {
281 bool operator()(EncodingNode* a, EncodingNode* b) const { return *a < *b; }
282};
283} // namespace
284
285EncodingNode* LazyMergeAllNodeWithPQ(const std::vector<EncodingNode*>& nodes,
286 SatSolver* solver,
287 std::deque<EncodingNode>* repository) {
288 std::priority_queue<EncodingNode*, std::vector<EncodingNode*>,
289 SortEncodingNodePointers>
290 pq(nodes.begin(), nodes.end());
291 while (pq.size() > 1) {
292 EncodingNode* a = pq.top();
293 pq.pop();
294 EncodingNode* b = pq.top();
295 pq.pop();
296 repository->push_back(LazyMerge(a, b, solver));
297 pq.push(&repository->back());
298 }
299 return pq.top();
300}
301
302std::vector<EncodingNode*> CreateInitialEncodingNodes(
303 const std::vector<Literal>& literals,
304 const std::vector<Coefficient>& coeffs, Coefficient* offset,
305 std::deque<EncodingNode>* repository) {
306 CHECK_EQ(literals.size(), coeffs.size());
307 *offset = 0;
308 std::vector<EncodingNode*> nodes;
309 for (int i = 0; i < literals.size(); ++i) {
310 // We want to maximize the cost when this literal is true.
311 if (coeffs[i] > 0) {
312 repository->emplace_back(literals[i]);
313 nodes.push_back(&repository->back());
314 nodes.back()->set_weight(coeffs[i]);
315 } else {
316 repository->emplace_back(literals[i].Negated());
317 nodes.push_back(&repository->back());
318 nodes.back()->set_weight(-coeffs[i]);
319
320 // Note that this increase the offset since the coeff is negative.
321 *offset -= coeffs[i];
322 }
323 }
324 return nodes;
325}
326
327std::vector<EncodingNode*> CreateInitialEncodingNodes(
328 const LinearObjective& objective_proto, Coefficient* offset,
329 std::deque<EncodingNode>* repository) {
330 *offset = 0;
331 std::vector<EncodingNode*> nodes;
332 for (int i = 0; i < objective_proto.literals_size(); ++i) {
333 const Literal literal(objective_proto.literals(i));
334
335 // We want to maximize the cost when this literal is true.
336 if (objective_proto.coefficients(i) > 0) {
337 repository->emplace_back(literal);
338 nodes.push_back(&repository->back());
339 nodes.back()->set_weight(Coefficient(objective_proto.coefficients(i)));
340 } else {
341 repository->emplace_back(literal.Negated());
342 nodes.push_back(&repository->back());
343 nodes.back()->set_weight(Coefficient(-objective_proto.coefficients(i)));
344
345 // Note that this increase the offset since the coeff is negative.
346 *offset -= objective_proto.coefficients(i);
347 }
348 }
349 return nodes;
350}
351
352namespace {
353
354bool EncodingNodeByWeight(const EncodingNode* a, const EncodingNode* b) {
355 return a->weight() < b->weight();
356}
357
358bool EncodingNodeByDepth(const EncodingNode* a, const EncodingNode* b) {
359 return a->depth() < b->depth();
360}
361
362bool EmptyEncodingNode(const EncodingNode* a) { return a->size() == 0; }
363
364} // namespace
365
367 Coefficient upper_bound, Coefficient stratified_lower_bound,
368 Coefficient* lower_bound, std::vector<EncodingNode*>* nodes,
369 SatSolver* solver) {
370 // Remove the left-most variables fixed to one from each node.
371 // Also update the lower_bound. Note that Reduce() needs the solver to be
372 // at the root node in order to work.
373 solver->Backtrack(0);
374 for (EncodingNode* n : *nodes) {
375 *lower_bound += n->Reduce(*solver) * n->weight();
376 }
377
378 // Fix the nodes right-most variables that are above the gap.
379 if (upper_bound != kCoefficientMax) {
380 const Coefficient gap = upper_bound - *lower_bound;
381 if (gap <= 0) return {};
382 for (EncodingNode* n : *nodes) {
383 n->ApplyUpperBound((gap / n->weight()).value(), solver);
384 }
385 }
386
387 // Remove the empty nodes.
388 nodes->erase(std::remove_if(nodes->begin(), nodes->end(), EmptyEncodingNode),
389 nodes->end());
390
391 // Sort the nodes.
392 switch (solver->parameters().max_sat_assumption_order()) {
393 case SatParameters::DEFAULT_ASSUMPTION_ORDER:
394 break;
395 case SatParameters::ORDER_ASSUMPTION_BY_DEPTH:
396 std::sort(nodes->begin(), nodes->end(), EncodingNodeByDepth);
397 break;
398 case SatParameters::ORDER_ASSUMPTION_BY_WEIGHT:
399 std::sort(nodes->begin(), nodes->end(), EncodingNodeByWeight);
400 break;
401 }
402 if (solver->parameters().max_sat_reverse_assumption_order()) {
403 // TODO(user): with DEFAULT_ASSUMPTION_ORDER, this will lead to a somewhat
404 // weird behavior, since we will reverse the nodes at each iteration...
405 std::reverse(nodes->begin(), nodes->end());
406 }
407
408 // Extract the assumptions from the nodes.
409 std::vector<Literal> assumptions;
410 for (EncodingNode* n : *nodes) {
411 if (n->weight() >= stratified_lower_bound) {
412 assumptions.push_back(n->literal(0).Negated());
413 }
414 }
415 return assumptions;
416}
417
418Coefficient ComputeCoreMinWeight(const std::vector<EncodingNode*>& nodes,
419 const std::vector<Literal>& core) {
420 Coefficient min_weight = kCoefficientMax;
421 int index = 0;
422 for (int i = 0; i < core.size(); ++i) {
423 for (;
424 index < nodes.size() && nodes[index]->literal(0).Negated() != core[i];
425 ++index) {
426 }
427 CHECK_LT(index, nodes.size());
428 min_weight = std::min(min_weight, nodes[index]->weight());
429 }
430 return min_weight;
431}
432
433Coefficient MaxNodeWeightSmallerThan(const std::vector<EncodingNode*>& nodes,
434 Coefficient upper_bound) {
435 Coefficient result(0);
436 for (EncodingNode* n : nodes) {
437 CHECK_GT(n->weight(), 0);
438 if (n->weight() < upper_bound) {
439 result = std::max(result, n->weight());
440 }
441 }
442 return result;
443}
444
445void ProcessCore(const std::vector<Literal>& core, Coefficient min_weight,
446 std::deque<EncodingNode>* repository,
447 std::vector<EncodingNode*>* nodes, SatSolver* solver) {
448 // Backtrack to be able to add new constraints.
449 solver->Backtrack(0);
450
451 if (core.size() == 1) {
452 // The core will be reduced at the beginning of the next loop.
453 // Find the associated node, and call IncreaseNodeSize() on it.
454 CHECK(solver->Assignment().LiteralIsFalse(core[0]));
455 for (EncodingNode* n : *nodes) {
456 if (n->literal(0).Negated() == core[0]) {
457 IncreaseNodeSize(n, solver);
458 return;
459 }
460 }
461 LOG(FATAL) << "Node with literal " << core[0] << " not found!";
462 }
463
464 // Remove from nodes the EncodingNode in the core, merge them, and add the
465 // resulting EncodingNode at the back.
466 int index = 0;
467 int new_node_index = 0;
468 std::vector<EncodingNode*> to_merge;
469 for (int i = 0; i < core.size(); ++i) {
470 // Since the nodes appear in order in the core, we can find the
471 // relevant "objective" variable efficiently with a simple linear scan
472 // in the nodes vector (done with index).
473 for (; (*nodes)[index]->literal(0).Negated() != core[i]; ++index) {
474 CHECK_LT(index, nodes->size());
475 (*nodes)[new_node_index] = (*nodes)[index];
476 ++new_node_index;
477 }
478 CHECK_LT(index, nodes->size());
479 to_merge.push_back((*nodes)[index]);
480
481 // Special case if the weight > min_weight. we keep it, but reduce its
482 // cost. This is the same "trick" as in WPM1 used to deal with weight.
483 // We basically split a clause with a larger weight in two identical
484 // clauses, one with weight min_weight that will be merged and one with
485 // the remaining weight.
486 if ((*nodes)[index]->weight() > min_weight) {
487 (*nodes)[index]->set_weight((*nodes)[index]->weight() - min_weight);
488 (*nodes)[new_node_index] = (*nodes)[index];
489 ++new_node_index;
490 }
491 ++index;
492 }
493 for (; index < nodes->size(); ++index) {
494 (*nodes)[new_node_index] = (*nodes)[index];
495 ++new_node_index;
496 }
497 nodes->resize(new_node_index);
498 nodes->push_back(LazyMergeAllNodeWithPQ(to_merge, solver, repository));
499 IncreaseNodeSize(nodes->back(), solver);
500 nodes->back()->set_weight(min_weight);
501 CHECK(solver->AddUnitClause(nodes->back()->literal(0)));
502}
503
504} // namespace sat
505} // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define CHECK_LT(val1, val2)
Definition: base/logging.h:700
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:697
#define CHECK_GE(val1, val2)
Definition: base/logging.h:701
#define CHECK_GT(val1, val2)
Definition: base/logging.h:702
#define LOG(severity)
Definition: base/logging.h:420
int Reduce(const SatSolver &solver)
Definition: encoding.cc:81
bool IncreaseCurrentUB(SatSolver *solver)
Definition: encoding.cc:71
EncodingNode * child_b() const
Definition: encoding.h:113
void InitializeLazyNode(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:55
void InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:35
Literal literal(int i) const
Definition: encoding.h:78
void ApplyUpperBound(int64 upper_bound, SatSolver *solver)
Definition: encoding.cc:97
EncodingNode * child_a() const
Definition: encoding.h:112
Literal GreaterThan(int i) const
Definition: encoding.h:74
void SetNumVariables(int num_variables)
Definition: sat_solver.cc:64
bool AddTernaryClause(Literal a, Literal b, Literal c)
Definition: sat_solver.cc:191
const SatParameters & parameters() const
Definition: sat_solver.cc:110
const VariablesAssignment & Assignment() const
Definition: sat_solver.h:362
bool AddBinaryClause(Literal a, Literal b)
Definition: sat_solver.cc:180
void Backtrack(int target_level)
Definition: sat_solver.cc:888
bool AddUnitClause(Literal true_literal)
Definition: sat_solver.cc:164
bool LiteralIsTrue(Literal literal) const
Definition: sat_base.h:150
bool LiteralIsFalse(Literal literal) const
Definition: sat_base.h:147
int64_t int64
const int FATAL
Definition: log_severity.h:32
Coefficient ComputeCoreMinWeight(const std::vector< EncodingNode * > &nodes, const std::vector< Literal > &core)
Definition: encoding.cc:418
EncodingNode * MergeAllNodesWithDeque(Coefficient upper_bound, const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition: encoding.cc:263
std::vector< Literal > ReduceNodesAndExtractAssumptions(Coefficient upper_bound, Coefficient stratified_lower_bound, Coefficient *lower_bound, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition: encoding.cc:366
EncodingNode * LazyMergeAllNodeWithPQ(const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition: encoding.cc:285
void IncreaseNodeSize(EncodingNode *node, SatSolver *solver)
Definition: encoding.cc:116
EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:106
EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:212
void ProcessCore(const std::vector< Literal > &core, Coefficient min_weight, std::deque< EncodingNode > *repository, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition: encoding.cc:445
const Coefficient kCoefficientMax(std::numeric_limits< Coefficient::ValueType >::max())
Coefficient MaxNodeWeightSmallerThan(const std::vector< EncodingNode * > &nodes, Coefficient upper_bound)
Definition: encoding.cc:433
std::vector< EncodingNode * > CreateInitialEncodingNodes(const std::vector< Literal > &literals, const std::vector< Coefficient > &coeffs, Coefficient *offset, std::deque< EncodingNode > *repository)
Definition: encoding.cc:302
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Literal literal
Definition: optimization.cc:84
int index
Definition: pack.cc:508
int64 weight
Definition: pack.cc:509