OR-Tools  8.2
constraint_solver/table.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14//
15// This file implements the table constraints.
16
17#include <algorithm>
18#include <memory>
19#include <string>
20#include <vector>
21
22#include "absl/container/flat_hash_map.h"
23#include "absl/strings/str_format.h"
24#include "absl/strings/str_join.h"
31#include "ortools/util/bitset.h"
34
35namespace operations_research {
36namespace {
37// ----- Presolve helpers -----
38// TODO(user): Move this out of this file.
39struct AffineTransformation { // y == a*x + b.
40 AffineTransformation() : a(1), b(0) {}
41 AffineTransformation(int64 aa, int64 bb) : a(aa), b(bb) { CHECK_NE(a, 0); }
44
45 bool Reverse(int64 value, int64* const reverse) const {
46 const int64 temp = value - b;
47 if (temp % a == 0) {
48 *reverse = temp / a;
49 DCHECK_EQ(Forward(*reverse), value);
50 return true;
51 } else {
52 return false;
53 }
54 }
55
56 int64 Forward(int64 value) const { return value * a + b; }
57
58 int64 UnsafeReverse(int64 value) const { return (value - b) / a; }
59
60 void Clear() {
61 a = 1;
62 b = 0;
63 }
64
65 std::string DebugString() const {
66 return absl::StrFormat("(%d * x + %d)", a, b);
67 }
68};
69
70// TODO(user): Move this out too.
71class VarLinearizer : public ModelParser {
72 public:
73 VarLinearizer() : target_var_(nullptr), transformation_(nullptr) {}
74 ~VarLinearizer() override {}
75
76 void VisitIntegerVariable(const IntVar* const variable,
77 const std::string& operation, int64 value,
78 IntVar* const delegate) override {
79 if (operation == ModelVisitor::kSumOperation) {
80 AddConstant(value);
81 delegate->Accept(this);
82 } else if (operation == ModelVisitor::kDifferenceOperation) {
83 AddConstant(value);
84 PushMultiplier(-1);
85 delegate->Accept(this);
86 PopMultiplier();
87 } else if (operation == ModelVisitor::kProductOperation) {
88 PushMultiplier(value);
89 delegate->Accept(this);
90 PopMultiplier();
91 } else if (operation == ModelVisitor::kTraceOperation) {
92 *target_var_ = const_cast<IntVar*>(variable);
93 transformation_->a = multipliers_.back();
94 }
95 }
96
97 void VisitIntegerVariable(const IntVar* const variable,
98 IntExpr* const delegate) override {
99 *target_var_ = const_cast<IntVar*>(variable);
100 transformation_->a = multipliers_.back();
101 }
102
103 void Visit(const IntVar* const var, IntVar** const target_var,
104 AffineTransformation* const transformation) {
105 target_var_ = target_var;
106 transformation_ = transformation;
107 transformation->Clear();
108 PushMultiplier(1);
109 var->Accept(this);
110 PopMultiplier();
111 CHECK(multipliers_.empty());
112 }
113
114 std::string DebugString() const override { return "VarLinearizer"; }
115
116 private:
117 void AddConstant(int64 constant) {
118 transformation_->b += constant * multipliers_.back();
119 }
120
121 void PushMultiplier(int64 multiplier) {
122 if (multipliers_.empty()) {
123 multipliers_.push_back(multiplier);
124 } else {
125 multipliers_.push_back(multiplier * multipliers_.back());
126 }
127 }
128
129 void PopMultiplier() { multipliers_.pop_back(); }
130
131 std::vector<int64> multipliers_;
132 IntVar** target_var_;
133 AffineTransformation* transformation_;
134};
135
136static const int kBitsInUint64 = 64;
137
138// ----- Positive Table Constraint -----
139
140// Structure of the constraint:
141
142// Tuples are indexed, we maintain a bitset for active tuples.
143
144// For each var and each value, we maintain a bitset mask of tuples
145// containing this value for this variable.
146
147// Propagation: When a value is removed, blank all active tuples using the
148// var-value mask.
149// Then we scan all other variable/values to see if there is an active
150// tuple that supports it.
151
152class BasePositiveTableConstraint : public Constraint {
153 public:
154 BasePositiveTableConstraint(Solver* const s, const std::vector<IntVar*>& vars,
155 const IntTupleSet& tuples)
156 : Constraint(s),
157 tuple_count_(tuples.NumTuples()),
158 arity_(vars.size()),
159 vars_(arity_),
160 holes_(arity_),
162 tuples_(tuples),
163 transformations_(arity_) {
164 // This constraint is intensive on domain and holes iterations on
165 // variables. Thus we can visit all variables to get to the
166 // boolean or domain int var beneath it. Then we can reverse
167 // process the tupleset to move in parallel to the simplifications
168 // of the variables. This way, we can keep the memory efficient
169 // nature of shared tuplesets (especially important for
170 // transitions constraints which are a chain of table
171 // constraints). The cost in running time is small as the tuples
172 // are read only once to construct the bitset data structures.
173 VarLinearizer linearizer;
174 for (int i = 0; i < arity_; ++i) {
175 linearizer.Visit(vars[i], &vars_[i], &transformations_[i]);
176 }
177 // Create hole iterators
178 for (int i = 0; i < arity_; ++i) {
179 holes_[i] = vars_[i]->MakeHoleIterator(true);
180 iterators_[i] = vars_[i]->MakeDomainIterator(true);
181 }
182 }
183
184 ~BasePositiveTableConstraint() override {}
185
186 std::string DebugString() const override {
187 return absl::StrFormat("AllowedAssignments(arity = %d, tuple_count = %d)",
189 }
190
191 void Accept(ModelVisitor* const visitor) const override {
192 visitor->BeginVisitConstraint(ModelVisitor::kAllowedAssignments, this);
193 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kVarsArgument,
194 vars_);
195 visitor->VisitIntegerMatrixArgument(ModelVisitor::kTuplesArgument, tuples_);
196 visitor->EndVisitConstraint(ModelVisitor::kAllowedAssignments, this);
197 }
198
199 protected:
200 bool TupleValue(int tuple_index, int var_index, int64* const value) const {
201 return transformations_[var_index].Reverse(
202 tuples_.Value(tuple_index, var_index), value);
203 }
204
205 int64 UnsafeTupleValue(int tuple_index, int var_index) const {
206 return transformations_[var_index].UnsafeReverse(
207 tuples_.Value(tuple_index, var_index));
208 }
209
210 bool IsTupleSupported(int tuple_index) {
211 for (int var_index = 0; var_index < arity_; ++var_index) {
212 int64 value = 0;
213 if (!TupleValue(tuple_index, var_index, &value) ||
214 !vars_[var_index]->Contains(value)) {
215 return false;
216 }
217 }
218 return true;
219 }
220
221 const int tuple_count_;
222 const int arity_;
223 std::vector<IntVar*> vars_;
224 std::vector<IntVarIterator*> holes_;
225 std::vector<IntVarIterator*> iterators_;
226 std::vector<int64> to_remove_;
227
228 private:
229 // All allowed tuples.
230 const IntTupleSet tuples_;
231 // The set of affine transformations that describe the
232 // simplification of the variables.
233 std::vector<AffineTransformation> transformations_;
234};
235
236class PositiveTableConstraint : public BasePositiveTableConstraint {
237 public:
238 typedef absl::flat_hash_map<int, std::vector<uint64>> ValueBitset;
239
240 PositiveTableConstraint(Solver* const s, const std::vector<IntVar*>& vars,
241 const IntTupleSet& tuples)
242 : BasePositiveTableConstraint(s, vars, tuples),
243 word_length_(BitLength64(tuples.NumTuples())),
244 active_tuples_(tuples.NumTuples()) {}
245
246 ~PositiveTableConstraint() override {}
247
248 void Post() override {
250 solver(), this, &PositiveTableConstraint::Propagate, "Propagate");
251 for (int i = 0; i < arity_; ++i) {
252 vars_[i]->WhenDomain(d);
253 Demon* u = MakeConstraintDemon1(
254 solver(), this, &PositiveTableConstraint::Update, "Update", i);
255 vars_[i]->WhenDomain(u);
256 }
257 // Initialize masks.
258 masks_.clear();
259 masks_.resize(arity_);
260 for (int i = 0; i < tuple_count_; ++i) {
261 InitializeMask(i);
262 }
263 // Initialize the active tuple bitset.
264 std::vector<uint64> actives(word_length_, 0);
265 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
266 if (IsTupleSupported(tuple_index)) {
267 SetBit64(actives.data(), tuple_index);
268 }
269 }
270 active_tuples_.Init(solver(), actives);
271 }
272
273 void InitialPropagate() override {
274 // Build active_ structure.
275 for (int var_index = 0; var_index < arity_; ++var_index) {
276 for (const auto& it : masks_[var_index]) {
277 if (!vars_[var_index]->Contains(it.first)) {
278 active_tuples_.RevSubtract(solver(), it.second);
279 }
280 }
281 }
282 if (active_tuples_.Empty()) {
283 solver()->Fail();
284 }
285 // Remove unreached values.
286 for (int var_index = 0; var_index < arity_; ++var_index) {
287 const ValueBitset& mask = masks_[var_index];
288 IntVar* const var = vars_[var_index];
289 to_remove_.clear();
290 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
291 if (!gtl::ContainsKey(mask, value)) {
292 to_remove_.push_back(value);
293 }
294 }
295 if (!to_remove_.empty()) {
296 var->RemoveValues(to_remove_);
297 }
298 }
299 }
300
301 void Propagate() {
302 for (int var_index = 0; var_index < arity_; ++var_index) {
303 IntVar* const var = vars_[var_index];
304 to_remove_.clear();
305 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
306 if (!Supported(var_index, value)) {
307 to_remove_.push_back(value);
308 }
309 }
310 if (!to_remove_.empty()) {
311 var->RemoveValues(to_remove_);
312 }
313 }
314 }
315
316 void Update(int index) {
317 const ValueBitset& var_masks = masks_[index];
318 IntVar* const var = vars_[index];
319 const int64 old_max = var->OldMax();
320 const int64 vmin = var->Min();
321 const int64 vmax = var->Max();
322 for (int64 value = var->OldMin(); value < vmin; ++value) {
323 const auto& it = var_masks.find(value);
324 if (it != var_masks.end()) {
325 BlankActives(it->second);
326 }
327 }
328 for (const int64 value : InitAndGetValues(holes_[index])) {
329 const auto& it = var_masks.find(value);
330 if (it != var_masks.end()) {
331 BlankActives(it->second);
332 }
333 }
334 for (int64 value = vmax + 1; value <= old_max; ++value) {
335 const auto& it = var_masks.find(value);
336 if (it != var_masks.end()) {
337 BlankActives(it->second);
338 }
339 }
340 }
341
342 void BlankActives(const std::vector<uint64>& mask) {
343 if (!mask.empty()) {
344 active_tuples_.RevSubtract(solver(), mask);
345 if (active_tuples_.Empty()) {
346 solver()->Fail();
347 }
348 }
349 }
350
351 bool Supported(int var_index, int64 value) {
352 DCHECK_GE(var_index, 0);
353 DCHECK_LT(var_index, arity_);
354 DCHECK(gtl::ContainsKey(masks_[var_index], value));
355 const std::vector<uint64>& mask = masks_[var_index][value];
356 int tmp = 0;
357 return active_tuples_.Intersects(mask, &tmp);
358 }
359
360 std::string DebugString() const override {
361 return absl::StrFormat("PositiveTableConstraint([%s], %d tuples)",
363 }
364
365 protected:
366 void InitializeMask(int tuple_index) {
367 std::vector<int64> cache(arity_);
368 for (int var_index = 0; var_index < arity_; ++var_index) {
369 if (!TupleValue(tuple_index, var_index, &cache[var_index])) {
370 return;
371 }
372 }
373 for (int var_index = 0; var_index < arity_; ++var_index) {
374 const int64 value = cache[var_index];
375 std::vector<uint64>& mask = masks_[var_index][value];
376 if (mask.empty()) {
377 mask.assign(word_length_, 0);
378 }
379 SetBit64(mask.data(), tuple_index);
380 }
381 }
382
383 const int word_length_;
384 UnsortedNullableRevBitset active_tuples_;
385 std::vector<ValueBitset> masks_;
386 std::vector<uint64> temp_mask_;
387};
388
389// ----- Compact Tables -----
390
391class CompactPositiveTableConstraint : public BasePositiveTableConstraint {
392 public:
393 CompactPositiveTableConstraint(Solver* const s,
394 const std::vector<IntVar*>& vars,
395 const IntTupleSet& tuples)
396 : BasePositiveTableConstraint(s, vars, tuples),
397 word_length_(BitLength64(tuples.NumTuples())),
398 active_tuples_(tuples.NumTuples()),
399 masks_(arity_),
400 mask_starts_(arity_),
401 mask_ends_(arity_),
402 original_min_(arity_, 0),
405 demon_(nullptr),
406 touched_var_(-1),
407 var_sizes_(arity_, 0) {}
408
409 ~CompactPositiveTableConstraint() override {}
410
411 void Post() override {
412 demon_ = solver()->RegisterDemon(MakeDelayedConstraintDemon0(
413 solver(), this, &CompactPositiveTableConstraint::Propagate,
414 "Propagate"));
415 for (int i = 0; i < arity_; ++i) {
416 Demon* const u = MakeConstraintDemon1(
417 solver(), this, &CompactPositiveTableConstraint::Update, "Update", i);
418 vars_[i]->WhenDomain(u);
419 }
420 for (int i = 0; i < arity_; ++i) {
421 var_sizes_.SetValue(solver(), i, vars_[i]->Size());
422 }
423 }
424
425 void InitialPropagate() override {
426 BuildMasks();
427 FillMasksAndActiveTuples();
428 ComputeMasksBoundaries();
429 BuildSupports();
430 RemoveUnsupportedValues();
431 }
432
433 // ----- Propagation -----
434
435 void Propagate() {
436 // Reset touch_var_ if in mode (more than 1 variable was modified).
437 if (touched_var_ == -2) {
438 touched_var_ = -1;
439 }
440 // This methods scans all values of all variables to see if they
441 // are still supported.
442 // This method is not attached to any particular variable, but is pushed
443 // at a delayed priority after Update(var_index) is called.
444 for (int var_index = 0; var_index < arity_; ++var_index) {
445 // This demons runs in low priority. Thus we know all the
446 // variables that have changed since the last time it was run.
447 // In that case, if only one var was touched, as propagation is
448 // exact, we do not need to recheck that variable.
449 if (var_index == touched_var_) {
450 touched_var_ = -1; // Clean now, it is a 1 time flag.
451 continue;
452 }
453 IntVar* const var = vars_[var_index];
454 const int64 original_min = original_min_[var_index];
455 const int64 var_size = var->Size();
456 // The domain iterator is very slow, let's try to see if we can
457 // work our way around.
458 switch (var_size) {
459 case 1: {
460 if (!Supported(var_index, var->Min() - original_min)) {
461 solver()->Fail();
462 }
463 break;
464 }
465 case 2: {
466 const int64 var_min = var->Min();
467 const int64 var_max = var->Max();
468 const bool min_support = Supported(var_index, var_min - original_min);
469 const bool max_support = Supported(var_index, var_max - original_min);
470 if (!min_support) {
471 if (!max_support) {
472 solver()->Fail();
473 } else {
474 var->SetValue(var_max);
475 var_sizes_.SetValue(solver(), var_index, 1);
476 }
477 } else if (!max_support) {
478 var->SetValue(var_min);
479 var_sizes_.SetValue(solver(), var_index, 1);
480 }
481 break;
482 }
483 default: {
484 to_remove_.clear();
485 const int64 var_min = var->Min();
486 const int64 var_max = var->Max();
487 int64 new_min = var_min;
488 int64 new_max = var_max;
489 // If the domain of a variable is an interval, it is much
490 // faster to iterate on that interval instead of using the
491 // iterator.
492 if (var_max - var_min + 1 == var_size) {
493 for (; new_min <= var_max; ++new_min) {
494 if (Supported(var_index, new_min - original_min)) {
495 break;
496 }
497 }
498 for (; new_max >= new_min; --new_max) {
499 if (Supported(var_index, new_max - original_min)) {
500 break;
501 }
502 }
503 var->SetRange(new_min, new_max);
504 for (int64 value = new_min + 1; value < new_max; ++value) {
505 if (!Supported(var_index, value - original_min)) {
506 to_remove_.push_back(value);
507 }
508 }
509 } else { // Domain is sparse.
510 // Let's not collect all values below the first supported
511 // value as this can easily and more rapidly be taken care
512 // of by a SetRange() call.
513 new_min = kint64max; // escape value.
514 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
515 if (!Supported(var_index, value - original_min)) {
516 to_remove_.push_back(value);
517 } else {
518 if (new_min == kint64max) {
519 new_min = value;
520 // This will be covered by the SetRange.
521 to_remove_.clear();
522 }
523 new_max = value;
524 }
525 }
526 var->SetRange(new_min, new_max);
527 // Trim the to_remove vector.
528 int index = to_remove_.size() - 1;
529 while (index >= 0 && to_remove_[index] > new_max) {
530 index--;
531 }
532 to_remove_.resize(index + 1);
533 }
534 var->RemoveValues(to_remove_);
535 var_sizes_.SetValue(solver(), var_index, var->Size());
536 }
537 }
538 }
539 }
540
541 void Update(int var_index) {
542 if (vars_[var_index]->Size() == var_sizes_.Value(var_index)) {
543 return;
544 }
545 // This method will update the set of active tuples by masking out all
546 // tuples attached to values of the variables that have been removed.
547
548 // We first collect the complete set of tuples to blank out in temp_mask_.
549 IntVar* const var = vars_[var_index];
550 bool changed = false;
551 const int64 omin = original_min_[var_index];
552 const int64 var_size = var->Size();
553 const int64 var_min = var->Min();
554 const int64 var_max = var->Max();
555
556 switch (var_size) {
557 case 1: {
558 changed = AndMaskWithActive(masks_[var_index][var_min - omin]);
559 break;
560 }
561 case 2: {
562 SetTempMask(var_index, var_min - omin);
563 OrTempMask(var_index, var_max - omin);
564 changed = AndMaskWithActive(temp_mask_);
565 break;
566 }
567 default: {
568 const int64 estimated_hole_size =
569 var_sizes_.Value(var_index) - var_size;
570 const int64 old_min = var->OldMin();
571 const int64 old_max = var->OldMax();
572 // Rough estimation of the number of operation if we scan
573 // deltas in the domain of the variable.
574 const int64 number_of_operations =
575 estimated_hole_size + var_min - old_min + old_max - var_max;
576 if (number_of_operations < var_size) {
577 // Let's scan the removed values since last run.
578 for (int64 value = old_min; value < var_min; ++value) {
579 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
580 }
581 for (const int64 value : InitAndGetValues(holes_[var_index])) {
582 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
583 }
584 for (int64 value = var_max + 1; value <= old_max; ++value) {
585 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
586 }
587 } else {
588 ClearTempMask();
589 // Let's build the mask of supported tuples from the current
590 // domain.
591 if (var_max - var_min + 1 == var_size) { // Contiguous.
592 for (int64 value = var_min; value <= var_max; ++value) {
593 OrTempMask(var_index, value - omin);
594 }
595 } else {
596 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
597 OrTempMask(var_index, value - omin);
598 }
599 }
600 // Then we and this mask with active_tuples_.
601 changed = AndMaskWithActive(temp_mask_);
602 }
603 // We maintain the size of the variables incrementally (when it
604 // is > 2).
605 var_sizes_.SetValue(solver(), var_index, var_size);
606 }
607 }
608 // We push the propagate method only if something has changed.
609 if (changed) {
610 if (touched_var_ == -1 || touched_var_ == var_index) {
611 touched_var_ = var_index;
612 } else {
613 touched_var_ = -2; // more than one var.
614 }
615 EnqueueDelayedDemon(demon_);
616 }
617 }
618
619 std::string DebugString() const override {
620 return absl::StrFormat("CompactPositiveTableConstraint([%s], %d tuples)",
622 }
623
624 private:
625 // ----- Initialization -----
626
627 void BuildMasks() {
628 // Build masks.
629 for (int i = 0; i < arity_; ++i) {
630 original_min_[i] = vars_[i]->Min();
631 const int64 span = vars_[i]->Max() - original_min_[i] + 1;
632 masks_[i].resize(span);
633 }
634 }
635
636 void FillMasksAndActiveTuples() {
637 std::vector<uint64> actives(word_length_, 0);
638 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
639 if (IsTupleSupported(tuple_index)) {
640 SetBit64(actives.data(), tuple_index);
641 // Fill in all masks.
642 for (int var_index = 0; var_index < arity_; ++var_index) {
643 const int64 value = UnsafeTupleValue(tuple_index, var_index);
644 const int64 value_index = value - original_min_[var_index];
645 DCHECK_GE(value_index, 0);
646 DCHECK_LT(value_index, masks_[var_index].size());
647 if (masks_[var_index][value_index].empty()) {
648 masks_[var_index][value_index].assign(word_length_, 0);
649 }
650 SetBit64(masks_[var_index][value_index].data(), tuple_index);
651 }
652 }
653 }
654 active_tuples_.Init(solver(), actives);
655 }
656
657 void RemoveUnsupportedValues() {
658 // remove unreached values.
659 for (int var_index = 0; var_index < arity_; ++var_index) {
660 IntVar* const var = vars_[var_index];
661 to_remove_.clear();
662 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
663 if (masks_[var_index][value - original_min_[var_index]].empty()) {
664 to_remove_.push_back(value);
665 }
666 }
667 if (!to_remove_.empty()) {
668 var->RemoveValues(to_remove_);
669 }
670 }
671 }
672
673 void ComputeMasksBoundaries() {
674 for (int var_index = 0; var_index < arity_; ++var_index) {
675 mask_starts_[var_index].resize(masks_[var_index].size());
676 mask_ends_[var_index].resize(masks_[var_index].size());
677 for (int value_index = 0; value_index < masks_[var_index].size();
678 ++value_index) {
679 const std::vector<uint64>& mask = masks_[var_index][value_index];
680 if (mask.empty()) {
681 continue;
682 }
683 int start = 0;
684 while (start < word_length_ && mask[start] == 0) {
685 start++;
686 }
687 DCHECK_LT(start, word_length_);
688 int end = word_length_ - 1;
689 while (end > start && mask[end] == 0) {
690 end--;
691 }
692 DCHECK_LE(start, end);
693 DCHECK_NE(mask[start], 0);
694 DCHECK_NE(mask[end], 0);
695 mask_starts_[var_index][value_index] = start;
696 mask_ends_[var_index][value_index] = end;
697 }
698 }
699 }
700
701 void BuildSupports() {
702 for (int var_index = 0; var_index < arity_; ++var_index) {
703 supports_[var_index].resize(masks_[var_index].size());
704 }
705 }
706
707 // ----- Helpers during propagation -----
708
709 bool AndMaskWithActive(const std::vector<uint64>& mask) {
710 const bool result = active_tuples_.RevAnd(solver(), mask);
711 if (active_tuples_.Empty()) {
712 solver()->Fail();
713 }
714 return result;
715 }
716
717 bool SubtractMaskFromActive(const std::vector<uint64>& mask) {
718 const bool result = active_tuples_.RevSubtract(solver(), mask);
719 if (active_tuples_.Empty()) {
720 solver()->Fail();
721 }
722 return result;
723 }
724
725 bool Supported(int var_index, int64 value_index) {
726 DCHECK_GE(var_index, 0);
727 DCHECK_LT(var_index, arity_);
728 DCHECK_GE(value_index, 0);
729 DCHECK_LT(value_index, masks_[var_index].size());
730 const std::vector<uint64>& mask = masks_[var_index][value_index];
731 DCHECK(!mask.empty());
732 return active_tuples_.Intersects(mask, &supports_[var_index][value_index]);
733 }
734
735 void OrTempMask(int var_index, int64 value_index) {
736 const std::vector<uint64>& mask = masks_[var_index][value_index];
737 if (!mask.empty()) {
738 const int mask_span = mask_ends_[var_index][value_index] -
739 mask_starts_[var_index][value_index] + 1;
740 if (active_tuples_.ActiveWordSize() < mask_span) {
741 for (int i : active_tuples_.active_words()) {
742 temp_mask_[i] |= mask[i];
743 }
744 } else {
745 for (int i = mask_starts_[var_index][value_index];
746 i <= mask_ends_[var_index][value_index]; ++i) {
747 temp_mask_[i] |= mask[i];
748 }
749 }
750 }
751 }
752
753 void SetTempMask(int var_index, int64 value_index) {
754 // We assume memset is much faster that looping and assigning.
755 // Still we do want to stay sparse if possible.
756 // Thus we switch between dense and sparse initialization by
757 // comparing the number of operations in both case, with constant factor.
758 // TODO(user): experiment with different constant values.
759 if (active_tuples_.ActiveWordSize() < word_length_ / 4) {
760 for (int i : active_tuples_.active_words()) {
761 temp_mask_[i] = masks_[var_index][value_index][i];
762 }
763 } else {
764 temp_mask_ = masks_[var_index][value_index];
765 }
766 }
767
768 void ClearTempMask() {
769 // See comment above.
770 if (active_tuples_.ActiveWordSize() < word_length_ / 4) {
771 for (int i : active_tuples_.active_words()) {
772 temp_mask_[i] = 0;
773 }
774 } else {
775 temp_mask_.assign(word_length_, 0);
776 }
777 }
778
779 // The length in 64 bit words of the number of tuples.
781 // The active bitset.
782 UnsortedNullableRevBitset active_tuples_;
783 // The masks per value per variable.
784 std::vector<std::vector<std::vector<uint64>>> masks_;
785 // The range of active indices in the masks.
786 std::vector<std::vector<int>> mask_starts_;
787 std::vector<std::vector<int>> mask_ends_;
788 // The min on the vars at creation time.
789 std::vector<int64> original_min_;
790 // A temporary mask use for computation.
791 std::vector<uint64> temp_mask_;
792 // The index of the word in the active bitset supporting each value per
793 // variable.
794 std::vector<std::vector<int>> supports_;
795 Demon* demon_;
796 int touched_var_;
797 RevArray<int64> var_sizes_;
798};
799
800// ----- Small Compact Table. -----
801
802// TODO(user): regroup code with CompactPositiveTableConstraint.
803
804class SmallCompactPositiveTableConstraint : public BasePositiveTableConstraint {
805 public:
806 SmallCompactPositiveTableConstraint(Solver* const s,
807 const std::vector<IntVar*>& vars,
808 const IntTupleSet& tuples)
809 : BasePositiveTableConstraint(s, vars, tuples),
811 stamp_(0),
812 masks_(arity_),
813 original_min_(arity_, 0),
814 demon_(nullptr),
815 touched_var_(-1) {
817 CHECK_GE(arity_, 0);
818 CHECK_LE(tuples.NumTuples(), kBitsInUint64);
819 }
820
821 ~SmallCompactPositiveTableConstraint() override {}
822
823 void Post() override {
824 demon_ = solver()->RegisterDemon(MakeDelayedConstraintDemon0(
825 solver(), this, &SmallCompactPositiveTableConstraint::Propagate,
826 "Propagate"));
827 for (int i = 0; i < arity_; ++i) {
828 if (!vars_[i]->Bound()) {
829 Demon* const update_demon = MakeConstraintDemon1(
830 solver(), this, &SmallCompactPositiveTableConstraint::Update,
831 "Update", i);
832 vars_[i]->WhenDomain(update_demon);
833 }
834 }
835 stamp_ = 0;
836 }
837
838 void InitMasks() {
839 // Build masks.
840 for (int i = 0; i < arity_; ++i) {
841 original_min_[i] = vars_[i]->Min();
842 const int64 span = vars_[i]->Max() - original_min_[i] + 1;
843 masks_[i].assign(span, 0);
844 }
845 }
846
847 bool IsTupleSupported(int tuple_index) {
848 for (int var_index = 0; var_index < arity_; ++var_index) {
849 int64 value = 0;
850 if (!TupleValue(tuple_index, var_index, &value) ||
851 !vars_[var_index]->Contains(value)) {
852 return false;
853 }
854 }
855 return true;
856 }
857
858 void ComputeActiveTuples() {
859 active_tuples_ = 0;
860 // Compute active_tuples_ and update masks.
861 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
862 if (IsTupleSupported(tuple_index)) {
863 const uint64 local_mask = OneBit64(tuple_index);
864 active_tuples_ |= local_mask;
865 for (int var_index = 0; var_index < arity_; ++var_index) {
866 const int64 value = UnsafeTupleValue(tuple_index, var_index);
867 masks_[var_index][value - original_min_[var_index]] |= local_mask;
868 }
869 }
870 }
871 if (!active_tuples_) {
872 solver()->Fail();
873 }
874 }
875
876 void RemoveUnsupportedValues() {
877 // remove unreached values.
878 for (int var_index = 0; var_index < arity_; ++var_index) {
879 IntVar* const var = vars_[var_index];
880 const int64 original_min = original_min_[var_index];
881 to_remove_.clear();
882 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
883 if (masks_[var_index][value - original_min] == 0) {
884 to_remove_.push_back(value);
885 }
886 }
887 if (!to_remove_.empty()) {
888 var->RemoveValues(to_remove_);
889 }
890 }
891 }
892
893 void InitialPropagate() override {
894 InitMasks();
895 ComputeActiveTuples();
896 RemoveUnsupportedValues();
897 }
898
899 void Propagate() {
900 // This methods scans all the values of all the variables to see if they
901 // are still supported.
902 // This method is not attached to any particular variable, but is pushed
903 // at a delayed priority and awakened by Update(var_index).
904
905 // Reset touch_var_ if in mode (more than 1 variable was modified).
906 if (touched_var_ == -2) {
907 touched_var_ = -1;
908 }
909
910 // We cache active_tuples_.
911 const uint64 actives = active_tuples_;
912
913 // We scan all variables and check their domains.
914 for (int var_index = 0; var_index < arity_; ++var_index) {
915 // This demons runs in low priority. Thus we know all the
916 // variables that have changed since the last time it was run.
917 // In that case, if only one var was touched, as propagation is
918 // exact, we do not need to recheck that variable.
919 if (var_index == touched_var_) {
920 touched_var_ = -1; // Clean it, it is a one time flag.
921 continue;
922 }
923 const std::vector<uint64>& var_mask = masks_[var_index];
924 const int64 original_min = original_min_[var_index];
925 IntVar* const var = vars_[var_index];
926 const int64 var_size = var->Size();
927 switch (var_size) {
928 case 1: {
929 if ((var_mask[var->Min() - original_min] & actives) == 0) {
930 // The difference with the non-small version of the table
931 // is that checking the validity of the resulting active
932 // tuples is cheap. Therefore we do not delay the check
933 // code.
934 solver()->Fail();
935 }
936 break;
937 }
938 case 2: {
939 const int64 var_min = var->Min();
940 const int64 var_max = var->Max();
941 const bool min_support =
942 (var_mask[var_min - original_min] & actives) != 0;
943 const bool max_support =
944 (var_mask[var_max - original_min] & actives) != 0;
945 if (!min_support && !max_support) {
946 solver()->Fail();
947 } else if (!min_support) {
948 var->SetValue(var_max);
949 } else if (!max_support) {
950 var->SetValue(var_min);
951 }
952 break;
953 }
954 default: {
955 to_remove_.clear();
956 const int64 var_min = var->Min();
957 const int64 var_max = var->Max();
958 int64 new_min = var_min;
959 int64 new_max = var_max;
960 if (var_max - var_min + 1 == var_size) {
961 // Contiguous case.
962 for (; new_min <= var_max; ++new_min) {
963 if ((var_mask[new_min - original_min] & actives) != 0) {
964 break;
965 }
966 }
967 for (; new_max >= new_min; --new_max) {
968 if ((var_mask[new_max - original_min] & actives) != 0) {
969 break;
970 }
971 }
972 var->SetRange(new_min, new_max);
973 for (int64 value = new_min + 1; value < new_max; ++value) {
974 if ((var_mask[value - original_min] & actives) == 0) {
975 to_remove_.push_back(value);
976 }
977 }
978 } else {
979 bool min_set = false;
980 int last_size = 0;
981 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
982 // The iterator is not safe w.r.t. deletion. Thus we
983 // postpone all value removals.
984 if ((var_mask[value - original_min] & actives) == 0) {
985 if (min_set) {
986 to_remove_.push_back(value);
987 }
988 } else {
989 if (!min_set) {
990 new_min = value;
991 min_set = true;
992 }
993 new_max = value;
994 last_size = to_remove_.size();
995 }
996 }
997 if (min_set) {
998 var->SetRange(new_min, new_max);
999 } else {
1000 solver()->Fail();
1001 }
1002 to_remove_.resize(last_size);
1003 }
1004 var->RemoveValues(to_remove_);
1005 }
1006 }
1007 }
1008 }
1009
1010 void Update(int var_index) {
1011 // This method updates the set of active tuples by masking out all
1012 // tuples attached to values of the variables that have been removed.
1013
1014 IntVar* const var = vars_[var_index];
1015 const int64 original_min = original_min_[var_index];
1016 const int64 var_size = var->Size();
1017 switch (var_size) {
1018 case 1: {
1019 ApplyMask(var_index, masks_[var_index][var->Min() - original_min]);
1020 return;
1021 }
1022 case 2: {
1023 ApplyMask(var_index, masks_[var_index][var->Min() - original_min] |
1024 masks_[var_index][var->Max() - original_min]);
1025 return;
1026 }
1027 default: {
1028 // We first collect the complete set of tuples to blank out in
1029 // temp_mask.
1030 const std::vector<uint64>& var_mask = masks_[var_index];
1031 const int64 old_min = var->OldMin();
1032 const int64 old_max = var->OldMax();
1033 const int64 var_min = var->Min();
1034 const int64 var_max = var->Max();
1035 const bool contiguous = var_size == var_max - var_min + 1;
1036 const bool nearly_contiguous =
1037 var_size > (var_max - var_min + 1) * 7 / 10;
1038
1039 // Count the number of masks to collect to compare the deduction
1040 // vs the construction of the new active bitset.
1041 // TODO(user): Implement HolesSize() on IntVar* and use it
1042 // to remove this code and the var_sizes in the non_small
1043 // version.
1044 uint64 hole_mask = 0;
1045 if (!contiguous) {
1046 for (const int64 value : InitAndGetValues(holes_[var_index])) {
1047 hole_mask |= var_mask[value - original_min];
1048 }
1049 }
1050 const int64 hole_operations = var_min - old_min + old_max - var_max;
1051 // We estimate the domain iterator to be 4x slower.
1052 const int64 domain_operations = contiguous ? var_size : 4 * var_size;
1053 if (hole_operations < domain_operations) {
1054 for (int64 value = old_min; value < var_min; ++value) {
1055 hole_mask |= var_mask[value - original_min];
1056 }
1057 for (int64 value = var_max + 1; value <= old_max; ++value) {
1058 hole_mask |= var_mask[value - original_min];
1059 }
1060 // We reverse the mask as this was negative information.
1061 ApplyMask(var_index, ~hole_mask);
1062 } else {
1063 uint64 domain_mask = 0;
1064 if (contiguous) {
1065 for (int64 value = var_min; value <= var_max; ++value) {
1066 domain_mask |= var_mask[value - original_min];
1067 }
1068 } else if (nearly_contiguous) {
1069 for (int64 value = var_min; value <= var_max; ++value) {
1070 if (var->Contains(value)) {
1071 domain_mask |= var_mask[value - original_min];
1072 }
1073 }
1074 } else {
1075 for (const int64 value : InitAndGetValues(iterators_[var_index])) {
1076 domain_mask |= var_mask[value - original_min];
1077 }
1078 }
1079 ApplyMask(var_index, domain_mask);
1080 }
1081 }
1082 }
1083 }
1084
1085 std::string DebugString() const override {
1086 return absl::StrFormat(
1087 "SmallCompactPositiveTableConstraint([%s], %d tuples)",
1089 }
1090
1091 private:
1092 void ApplyMask(int var_index, uint64 mask) {
1093 if ((~mask & active_tuples_) != 0) {
1094 // Check if we need to save the active_tuples in this node.
1095 const uint64 current_stamp = solver()->stamp();
1096 if (stamp_ < current_stamp) {
1097 stamp_ = current_stamp;
1098 solver()->SaveValue(&active_tuples_);
1099 }
1100 active_tuples_ &= mask;
1101 if (active_tuples_) {
1102 // Maintain touched_var_.
1103 if (touched_var_ == -1 || touched_var_ == var_index) {
1104 touched_var_ = var_index;
1105 } else {
1106 touched_var_ = -2; // more than one var.
1107 }
1108 EnqueueDelayedDemon(demon_);
1109 } else {
1110 // Clean it before failing.
1111 touched_var_ = -1;
1112 solver()->Fail();
1113 }
1114 }
1115 }
1116
1117 // Bitset of active tuples.
1119 // Stamp of the active_tuple bitset.
1120 uint64 stamp_;
1121 // The masks per value per variable.
1122 std::vector<std::vector<uint64>> masks_;
1123 // The min on the vars at creation time.
1124 std::vector<int64> original_min_;
1125 Demon* demon_;
1126 int touched_var_;
1127};
1128
1129bool HasCompactDomains(const std::vector<IntVar*>& vars) {
1130 return true; // Always assume compact table.
1131}
1132
1133// ---------- Deterministic Finite Automaton ----------
1134
1135// This constraint implements a finite automaton when transitions are
1136// the values of the variables in the array.
1137// that is state[i+1] = transition[var[i]][state[i]] if
1138// (state[i], var[i], state[i+1]) in the transition table.
1139// There is only one possible transition for a state/value pair.
1140class TransitionConstraint : public Constraint {
1141 public:
1142 static const int kStatePosition;
1143 static const int kNextStatePosition;
1144 static const int kTransitionTupleSize;
1145 TransitionConstraint(Solver* const s, const std::vector<IntVar*>& vars,
1146 const IntTupleSet& transition_table, int64 initial_state,
1147 const std::vector<int64>& final_states)
1148 : Constraint(s),
1149 vars_(vars),
1150 transition_table_(transition_table),
1151 initial_state_(initial_state),
1152 final_states_(final_states) {}
1153
1154 TransitionConstraint(Solver* const s, const std::vector<IntVar*>& vars,
1155 const IntTupleSet& transition_table, int64 initial_state,
1156 const std::vector<int>& final_states)
1157 : Constraint(s),
1158 vars_(vars),
1159 transition_table_(transition_table),
1160 initial_state_(initial_state),
1161 final_states_(final_states.size()) {
1162 for (int i = 0; i < final_states.size(); ++i) {
1163 final_states_[i] = final_states[i];
1164 }
1165 }
1166
1167 ~TransitionConstraint() override {}
1168
1169 void Post() override {
1170 Solver* const s = solver();
1171 int64 state_min = kint64max;
1172 int64 state_max = kint64min;
1173 const int nb_vars = vars_.size();
1174 for (int i = 0; i < transition_table_.NumTuples(); ++i) {
1175 state_max =
1176 std::max(state_max, transition_table_.Value(i, kStatePosition));
1177 state_max =
1178 std::max(state_max, transition_table_.Value(i, kNextStatePosition));
1179 state_min =
1180 std::min(state_min, transition_table_.Value(i, kStatePosition));
1181 state_min =
1182 std::min(state_min, transition_table_.Value(i, kNextStatePosition));
1183 }
1184
1185 std::vector<IntVar*> states;
1186 states.push_back(s->MakeIntConst(initial_state_));
1187 for (int var_index = 1; var_index < nb_vars; ++var_index) {
1188 states.push_back(s->MakeIntVar(state_min, state_max));
1189 }
1190 states.push_back(s->MakeIntVar(final_states_));
1191 CHECK_EQ(nb_vars + 1, states.size());
1192
1193 const int num_tuples = transition_table_.NumTuples();
1194
1195 for (int var_index = 0; var_index < nb_vars; ++var_index) {
1196 std::vector<IntVar*> tmp_vars(3);
1197 tmp_vars[0] = states[var_index];
1198 tmp_vars[1] = vars_[var_index];
1199 tmp_vars[2] = states[var_index + 1];
1200 // We always build the compact versions of the tables.
1201 if (num_tuples <= kBitsInUint64) {
1202 s->AddConstraint(s->RevAlloc(new SmallCompactPositiveTableConstraint(
1203 s, tmp_vars, transition_table_)));
1204 } else {
1205 s->AddConstraint(s->RevAlloc(new CompactPositiveTableConstraint(
1206 s, tmp_vars, transition_table_)));
1207 }
1208 }
1209 }
1210
1211 void InitialPropagate() override {}
1212
1213 void Accept(ModelVisitor* const visitor) const override {
1214 visitor->BeginVisitConstraint(ModelVisitor::kTransition, this);
1215 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kVarsArgument,
1216 vars_);
1217 visitor->VisitIntegerArgument(ModelVisitor::kInitialState, initial_state_);
1218 visitor->VisitIntegerArrayArgument(ModelVisitor::kFinalStatesArgument,
1219 final_states_);
1220 visitor->VisitIntegerMatrixArgument(ModelVisitor::kTuplesArgument,
1221 transition_table_);
1222 visitor->EndVisitConstraint(ModelVisitor::kTransition, this);
1223 }
1224
1225 std::string DebugString() const override {
1226 return absl::StrFormat(
1227 "TransitionConstraint([%s], %d transitions, initial = %d, final = "
1228 "[%s])",
1229 JoinDebugStringPtr(vars_, ", "), transition_table_.NumTuples(),
1230 initial_state_, absl::StrJoin(final_states_, ", "));
1231 }
1232
1233 private:
1234 // Variable representing transitions between states. See header file.
1235 const std::vector<IntVar*> vars_;
1236 // The transition as tuples (state, value, next_state).
1237 const IntTupleSet transition_table_;
1238 // The initial state before the first transition.
1239 const int64 initial_state_;
1240 // Vector of final state after the last transision.
1241 std::vector<int64> final_states_;
1242};
1243
1247} // namespace
1248
1249// --------- API ----------
1250
1251Constraint* Solver::MakeAllowedAssignments(const std::vector<IntVar*>& vars,
1252 const IntTupleSet& tuples) {
1253 if (HasCompactDomains(vars)) {
1254 if (tuples.NumTuples() < kBitsInUint64 && parameters_.use_small_table()) {
1255 return RevAlloc(
1256 new SmallCompactPositiveTableConstraint(this, vars, tuples));
1257 } else {
1258 return RevAlloc(new CompactPositiveTableConstraint(this, vars, tuples));
1259 }
1260 }
1261 return RevAlloc(new PositiveTableConstraint(this, vars, tuples));
1262}
1263
1265 const std::vector<IntVar*>& vars, const IntTupleSet& transition_table,
1266 int64 initial_state, const std::vector<int64>& final_states) {
1267 return RevAlloc(new TransitionConstraint(this, vars, transition_table,
1268 initial_state, final_states));
1269}
1270
1272 const std::vector<IntVar*>& vars, const IntTupleSet& transition_table,
1273 int64 initial_state, const std::vector<int>& final_states) {
1274 return RevAlloc(new TransitionConstraint(this, vars, transition_table,
1275 initial_state, final_states));
1276}
1277
1278} // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:697
#define CHECK_GE(val1, val2)
Definition: base/logging.h:701
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define CHECK_NE(val1, val2)
Definition: base/logging.h:698
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define DCHECK(condition)
Definition: base/logging.h:884
#define CHECK_LE(val1, val2)
Definition: base/logging.h:699
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
A constraint is the main modeling object.
Constraint * MakeTransitionConstraint(const std::vector< IntVar * > &vars, const IntTupleSet &transition_table, int64 initial_state, const std::vector< int64 > &final_states)
This constraint create a finite automaton that will check the sequence of variables vars.
Constraint * MakeAllowedAssignments(const std::vector< IntVar * > &vars, const IntTupleSet &tuples)
This method creates a constraint where the graph of the relation between the variables is given in ex...
T * RevAlloc(T *object)
Registers the given object as being reversible.
std::vector< int64 > to_remove_
UnsortedNullableRevBitset active_tuples_
static const int kTransitionTupleSize
std::vector< uint64 > temp_mask_
static const int kStatePosition
const int arity_
static const int kNextStatePosition
std::vector< ValueBitset > masks_
std::vector< IntVar * > vars_
std::vector< IntVarIterator * > holes_
const int tuple_count_
const int word_length_
std::vector< IntVarIterator * > iterators_
int64 value
IntVar * var
Definition: expr_array.cc:1858
std::vector< int > supports_
static const int64 kint64max
int64_t int64
uint64_t uint64
static const int64 kint64min
bool ContainsKey(const Collection &collection, const Key &key)
Definition: map_util.h:170
std::function< void(Model *)> TransitionConstraint(const std::vector< IntegerVariable > &vars, const std::vector< std::vector< int64 > > &automaton, int64 initial_state, const std::vector< int64 > &final_states)
Definition: sat/table.cc:591
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Demon * MakeDelayedConstraintDemon0(Solver *const s, T *const ct, void(T::*method)(), const std::string &name)
uint64 BitLength64(uint64 size)
Definition: bitset.h:338
void SetBit64(uint64 *const bitset, uint64 pos)
Definition: bitset.h:354
Demon * MakeConstraintDemon1(Solver *const s, T *const ct, void(T::*method)(P), const std::string &name, P param1)
uint64 OneBit64(int pos)
Definition: bitset.h:38
std::string JoinDebugStringPtr(const std::vector< T > &v, const std::string &separator)
Definition: string_array.h:45
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
int index
Definition: pack.cc:508
IntervalVar *const target_var_
const int64 stamp_
Definition: search.cc:3039