OR-Tools  8.2
resource.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// This file contains implementations of several resource constraints.
15// The implemented constraints are:
16// * Disjunctive: forces a set of intervals to be non-overlapping
17// * Cumulative: forces a set of intervals with associated demands to be such
18// that the sum of demands of the intervals containing any given integer
19// does not exceed a capacity.
20// In addition, it implements the SequenceVar that allows ranking decisions
21// on a set of interval variables.
22
23#include <algorithm>
24#include <queue>
25#include <string>
26#include <utility>
27#include <vector>
28
29#include "absl/container/flat_hash_map.h"
30#include "absl/strings/str_cat.h"
31#include "absl/strings/str_format.h"
32#include "absl/strings/str_join.h"
36#include "ortools/base/macros.h"
41#include "ortools/util/bitset.h"
45
46namespace operations_research {
47namespace {
48// ----- Comparison functions -----
49
50// TODO(user): Tie breaking.
51
52// Comparison methods, used by the STL sort.
53template <class Task>
54bool StartMinLessThan(Task* const w1, Task* const w2) {
55 return (w1->interval->StartMin() < w2->interval->StartMin());
56}
57
58// A comparator that sorts the tasks by their effective earliest start time when
59// using the shortest duration possible. This comparator can be used when
60// sorting the tasks before they are inserted to a Theta-tree.
61template <class Task>
62bool ShortestDurationStartMinLessThan(Task* const w1, Task* const w2) {
63 return w1->interval->EndMin() - w1->interval->DurationMin() <
64 w2->interval->EndMin() - w2->interval->DurationMin();
65}
66
67template <class Task>
68bool StartMaxLessThan(Task* const w1, Task* const w2) {
69 return (w1->interval->StartMax() < w2->interval->StartMax());
70}
71
72template <class Task>
73bool EndMinLessThan(Task* const w1, Task* const w2) {
74 return (w1->interval->EndMin() < w2->interval->EndMin());
75}
76
77template <class Task>
78bool EndMaxLessThan(Task* const w1, Task* const w2) {
79 return (w1->interval->EndMax() < w2->interval->EndMax());
80}
81
82bool IntervalStartMinLessThan(IntervalVar* i1, IntervalVar* i2) {
83 return i1->StartMin() < i2->StartMin();
84}
85
86// ----- Wrappers around intervals -----
87
88// A DisjunctiveTask is a non-preemptive task sharing a disjunctive resource.
89// That is, it corresponds to an interval, and this interval cannot overlap with
90// any other interval of a DisjunctiveTask sharing the same resource.
91// It is indexed, that is it is aware of its position in a reference array.
92struct DisjunctiveTask {
93 explicit DisjunctiveTask(IntervalVar* const interval_)
94 : interval(interval_), index(-1) {}
95
96 std::string DebugString() const { return interval->DebugString(); }
97
98 IntervalVar* interval;
99 int index;
100};
101
102// A CumulativeTask is a non-preemptive task sharing a cumulative resource.
103// That is, it corresponds to an interval and a demand. The sum of demands of
104// all cumulative tasks CumulativeTasks sharing a resource of capacity c those
105// intervals contain any integer t cannot exceed c.
106// It is indexed, that is it is aware of its position in a reference array.
107struct CumulativeTask {
108 CumulativeTask(IntervalVar* const interval_, int64 demand_)
109 : interval(interval_), demand(demand_), index(-1) {}
110
111 int64 EnergyMin() const { return interval->DurationMin() * demand; }
112
113 int64 DemandMin() const { return demand; }
114
115 void WhenAnything(Demon* const demon) { interval->WhenAnything(demon); }
116
117 std::string DebugString() const {
118 return absl::StrFormat("Task{ %s, demand: %d }", interval->DebugString(),
119 demand);
120 }
121
122 IntervalVar* interval;
124 int index;
125};
126
127// A VariableCumulativeTask is a non-preemptive task sharing a
128// cumulative resource. That is, it corresponds to an interval and a
129// demand. The sum of demands of all cumulative tasks
130// VariableCumulativeTasks sharing a resource of capacity c whose
131// intervals contain any integer t cannot exceed c. It is indexed,
132// that is it is aware of its position in a reference array.
133struct VariableCumulativeTask {
134 VariableCumulativeTask(IntervalVar* const interval_, IntVar* demand_)
135 : interval(interval_), demand(demand_), index(-1) {}
136
137 int64 EnergyMin() const { return interval->DurationMin() * demand->Min(); }
138
139 int64 DemandMin() const { return demand->Min(); }
140
141 void WhenAnything(Demon* const demon) {
142 interval->WhenAnything(demon);
143 demand->WhenRange(demon);
144 }
145
146 std::string DebugString() const {
147 return absl::StrFormat("Task{ %s, demand: %s }", interval->DebugString(),
148 demand->DebugString());
149 }
150
151 IntervalVar* const interval;
152 IntVar* const demand;
153 int index;
154};
155
156// ---------- Theta-Trees ----------
157
158// This is based on Petr Vilim (public) PhD work.
159// All names comes from his work. See http://vilim.eu/petr.
160
161// Node of a Theta-tree
162struct ThetaNode {
163 // Identity element
164 ThetaNode() : total_processing(0), total_ect(kint64min) {}
165
166 // Single interval element
167 explicit ThetaNode(const IntervalVar* const interval)
168 : total_processing(interval->DurationMin()),
169 total_ect(interval->EndMin()) {
170 // NOTE(user): Petr Vilim's thesis assumes that all tasks in the
171 // scheduling problem have fixed duration and that propagation already
172 // updated the bounds of the start/end times accordingly.
173 // The problem in this case is that the recursive formula for computing
174 // total_ect was only proved for the case where the duration is fixed; in
175 // our case, we use StartMin() + DurationMin() for the earliest completion
176 // time of a task, which should not break any assumptions, but may give
177 // bounds that are too loose.
178 }
179
180 void Compute(const ThetaNode& left, const ThetaNode& right) {
181 total_processing = CapAdd(left.total_processing, right.total_processing);
182 total_ect = std::max(CapAdd(left.total_ect, right.total_processing),
183 right.total_ect);
184 }
185
186 bool IsIdentity() const {
187 return total_processing == 0LL && total_ect == kint64min;
188 }
189
190 std::string DebugString() const {
191 return absl::StrCat("ThetaNode{ p = ", total_processing,
192 ", e = ", total_ect < 0LL ? -1LL : total_ect, " }");
193 }
194
197};
198
199// A theta-tree is a container for a set of intervals supporting the following
200// operations:
201// * Insertions and deletion in O(log size_), with size_ the maximal number of
202// tasks the tree may contain;
203// * Querying the following quantity in O(1):
204// Max_{subset S of the set of contained intervals} (
205// Min_{i in S}(i.StartMin) + Sum_{i in S}(i.DurationMin) )
206class ThetaTree : public MonoidOperationTree<ThetaNode> {
207 public:
208 explicit ThetaTree(int size) : MonoidOperationTree<ThetaNode>(size) {}
209
210 int64 Ect() const { return result().total_ect; }
211
212 void Insert(const DisjunctiveTask* const task) {
213 Set(task->index, ThetaNode(task->interval));
214 }
215
216 void Remove(const DisjunctiveTask* const task) { Reset(task->index); }
217
218 bool IsInserted(const DisjunctiveTask* const task) const {
219 return !GetOperand(task->index).IsIdentity();
220 }
221};
222
223// ----------------- Lambda Theta Tree -----------------------
224
225// Lambda-theta-node
226// These nodes are cumulative lambda theta-node. This is reflected in the
227// terminology. They can also be used in the disjunctive case, and this incurs
228// no performance penalty.
229struct LambdaThetaNode {
230 // Special value for task indices meaning 'no such task'.
231 static const int kNone;
232
233 // Identity constructor
234 LambdaThetaNode()
235 : energy(0LL),
237 energy_opt(0LL),
241
242 // Constructor for a single cumulative task in the Theta set
243 LambdaThetaNode(int64 capacity, const CumulativeTask& task)
244 : energy(task.EnergyMin()),
245 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
250
251 // Constructor for a single cumulative task in the Lambda set
252 LambdaThetaNode(int64 capacity, const CumulativeTask& task, int index)
253 : energy(0LL),
255 energy_opt(task.EnergyMin()),
257 energetic_end_min_opt(capacity * task.interval->StartMin() +
258 energy_opt),
260 DCHECK_GE(index, 0);
261 }
262
263 // Constructor for a single cumulative task in the Theta set
264 LambdaThetaNode(int64 capacity, const VariableCumulativeTask& task)
265 : energy(task.EnergyMin()),
266 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
271
272 // Constructor for a single cumulative task in the Lambda set
273 LambdaThetaNode(int64 capacity, const VariableCumulativeTask& task, int index)
274 : energy(0LL),
276 energy_opt(task.EnergyMin()),
278 energetic_end_min_opt(capacity * task.interval->StartMin() +
279 energy_opt),
281 DCHECK_GE(index, 0);
282 }
283
284 // Constructor for a single interval in the Theta set
285 explicit LambdaThetaNode(const IntervalVar* const interval)
286 : energy(interval->DurationMin()),
287 energetic_end_min(interval->EndMin()),
288 energy_opt(interval->DurationMin()),
292
293 // Constructor for a single interval in the Lambda set
294 // 'index' is the index of the given interval in the est vector
295 LambdaThetaNode(const IntervalVar* const interval, int index)
296 : energy(0LL),
298 energy_opt(interval->DurationMin()),
302 DCHECK_GE(index, 0);
303 }
304
305 // Sets this LambdaThetaNode to the result of the natural binary operations
306 // over the two given operands, corresponding to the following set operations:
307 // Theta = left.Theta union right.Theta
308 // Lambda = left.Lambda union right.Lambda
309 //
310 // No set operation actually occur: we only maintain the relevant quantities
311 // associated with such sets.
312 void Compute(const LambdaThetaNode& left, const LambdaThetaNode& right) {
313 energy = CapAdd(left.energy, right.energy);
314 energetic_end_min = std::max(right.energetic_end_min,
315 CapAdd(left.energetic_end_min, right.energy));
316 const int64 energy_left_opt = CapAdd(left.energy_opt, right.energy);
317 const int64 energy_right_opt = CapAdd(left.energy, right.energy_opt);
318 if (energy_left_opt > energy_right_opt) {
319 energy_opt = energy_left_opt;
320 argmax_energy_opt = left.argmax_energy_opt;
321 } else {
322 energy_opt = energy_right_opt;
323 argmax_energy_opt = right.argmax_energy_opt;
324 }
325 const int64 ect1 = right.energetic_end_min_opt;
326 const int64 ect2 = CapAdd(left.energetic_end_min, right.energy_opt);
327 const int64 ect3 = CapAdd(left.energetic_end_min_opt, right.energy);
328 if (ect1 >= ect2 && ect1 >= ect3) { // ect1 max
330 argmax_energetic_end_min_opt = right.argmax_energetic_end_min_opt;
331 } else if (ect2 >= ect1 && ect2 >= ect3) { // ect2 max
333 argmax_energetic_end_min_opt = right.argmax_energy_opt;
334 } else { // ect3 max
336 argmax_energetic_end_min_opt = left.argmax_energetic_end_min_opt;
337 }
338 // The processing time, with one grey interval, should be no less than
339 // without any grey interval.
341 // If there is no responsible grey interval for the processing time,
342 // the processing time with a grey interval should equal the one
343 // without.
345 }
346
347 // Amount of resource consumed by the Theta set, in units of demand X time.
348 // This is energy(Theta).
350
351 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
353
354 // Max_{i in Lambda} (energy(Theta union {i}))
356
357 // The argmax in energy_opt_. It is the index of the chosen task in the Lambda
358 // set, if any, or kNone if none.
360
361 // Max_{subset S of Theta, i in Lambda}
362 // (capacity * start_min(S union {i}) + energy(S union {i}))
364
365 // The argmax in energetic_end_min_opt_. It is the index of the chosen task in
366 // the Lambda set, if any, or kNone if none.
368};
369
370const int LambdaThetaNode::kNone = -1;
371
372// Disjunctive Lambda-Theta tree
373class DisjunctiveLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
374 public:
375 explicit DisjunctiveLambdaThetaTree(int size)
376 : MonoidOperationTree<LambdaThetaNode>(size) {}
377
378 void Insert(const DisjunctiveTask& task) {
379 Set(task.index, LambdaThetaNode(task.interval));
380 }
381
382 void Grey(const DisjunctiveTask& task) {
383 const int index = task.index;
384 Set(index, LambdaThetaNode(task.interval, index));
385 }
386
387 int64 Ect() const { return result().energetic_end_min; }
388 int64 EctOpt() const { return result().energetic_end_min_opt; }
389 int ResponsibleOpt() const { return result().argmax_energetic_end_min_opt; }
390};
391
392// A cumulative lambda-theta tree
393class CumulativeLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
394 public:
395 CumulativeLambdaThetaTree(int size, int64 capacity_max)
396 : MonoidOperationTree<LambdaThetaNode>(size),
397 capacity_max_(capacity_max) {}
398
399 void Init(int64 capacity_max) {
400 Clear();
401 capacity_max_ = capacity_max;
402 }
403
404 void Insert(const CumulativeTask& task) {
405 Set(task.index, LambdaThetaNode(capacity_max_, task));
406 }
407
408 void Grey(const CumulativeTask& task) {
409 const int index = task.index;
410 Set(index, LambdaThetaNode(capacity_max_, task, index));
411 }
412
413 void Insert(const VariableCumulativeTask& task) {
414 Set(task.index, LambdaThetaNode(capacity_max_, task));
415 }
416
417 void Grey(const VariableCumulativeTask& task) {
418 const int index = task.index;
419 Set(index, LambdaThetaNode(capacity_max_, task, index));
420 }
421
422 int64 energetic_end_min() const { return result().energetic_end_min; }
423 int64 energetic_end_min_opt() const { return result().energetic_end_min_opt; }
424 int64 Ect() const {
425 return MathUtil::CeilOfRatio(energetic_end_min(), capacity_max_);
426 }
427 int64 EctOpt() const {
428 return MathUtil::CeilOfRatio(result().energetic_end_min_opt, capacity_max_);
429 }
430 int argmax_energetic_end_min_opt() const {
431 return result().argmax_energetic_end_min_opt;
432 }
433
434 private:
435 int64 capacity_max_;
436};
437
438// -------------- Not Last -----------------------------------------
439
440// A class that implements the 'Not-Last' propagation algorithm for the unary
441// resource constraint.
442class NotLast {
443 public:
444 NotLast(Solver* const solver, const std::vector<IntervalVar*>& intervals,
445 bool mirror, bool strict);
446
447 ~NotLast() { gtl::STLDeleteElements(&by_start_min_); }
448
449 bool Propagate();
450
451 private:
452 ThetaTree theta_tree_;
453 std::vector<DisjunctiveTask*> by_start_min_;
454 std::vector<DisjunctiveTask*> by_end_max_;
455 std::vector<DisjunctiveTask*> by_start_max_;
456 std::vector<int64> new_lct_;
457 const bool strict_;
458};
459
460NotLast::NotLast(Solver* const solver,
461 const std::vector<IntervalVar*>& intervals, bool mirror,
462 bool strict)
463 : theta_tree_(intervals.size()),
464 by_start_min_(intervals.size()),
465 by_end_max_(intervals.size()),
466 by_start_max_(intervals.size()),
467 new_lct_(intervals.size(), -1LL),
468 strict_(strict) {
469 // Populate the different vectors.
470 for (int i = 0; i < intervals.size(); ++i) {
471 IntervalVar* const underlying =
472 mirror ? solver->MakeMirrorInterval(intervals[i]) : intervals[i];
473 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMin(underlying);
474 by_start_min_[i] = new DisjunctiveTask(relaxed);
475 by_end_max_[i] = by_start_min_[i];
476 by_start_max_[i] = by_start_min_[i];
477 }
478}
479
480bool NotLast::Propagate() {
481 // ---- Init ----
482 std::sort(by_start_max_.begin(), by_start_max_.end(),
483 StartMaxLessThan<DisjunctiveTask>);
484 std::sort(by_end_max_.begin(), by_end_max_.end(),
485 EndMaxLessThan<DisjunctiveTask>);
486 // Update start min positions
487 std::sort(by_start_min_.begin(), by_start_min_.end(),
488 StartMinLessThan<DisjunctiveTask>);
489 for (int i = 0; i < by_start_min_.size(); ++i) {
490 by_start_min_[i]->index = i;
491 }
492 theta_tree_.Clear();
493 for (int i = 0; i < by_start_min_.size(); ++i) {
494 new_lct_[i] = by_start_min_[i]->interval->EndMax();
495 }
496
497 // --- Execute ----
498 int j = 0;
499 for (DisjunctiveTask* const twi : by_end_max_) {
500 while (j < by_start_max_.size() &&
501 twi->interval->EndMax() > by_start_max_[j]->interval->StartMax()) {
502 if (j > 0 && theta_tree_.Ect() > by_start_max_[j]->interval->StartMax()) {
503 const int64 new_end_max = by_start_max_[j - 1]->interval->StartMax();
504 new_lct_[by_start_max_[j]->index] = new_end_max;
505 }
506 theta_tree_.Insert(by_start_max_[j]);
507 j++;
508 }
509 const bool inserted = theta_tree_.IsInserted(twi);
510 if (inserted) {
511 theta_tree_.Remove(twi);
512 }
513 const int64 ect_theta_less_i = theta_tree_.Ect();
514 if (inserted) {
515 theta_tree_.Insert(twi);
516 }
517 if (ect_theta_less_i > twi->interval->EndMax() && j > 0) {
518 const int64 new_end_max = by_start_max_[j - 1]->interval->EndMax();
519 if (new_end_max > new_lct_[twi->index]) {
520 new_lct_[twi->index] = new_end_max;
521 }
522 }
523 }
524
525 // Apply modifications
526 bool modified = false;
527 for (int i = 0; i < by_start_min_.size(); ++i) {
528 IntervalVar* const var = by_start_min_[i]->interval;
529 if ((strict_ || var->DurationMin() > 0) && var->EndMax() > new_lct_[i]) {
530 modified = true;
531 var->SetEndMax(new_lct_[i]);
532 }
533 }
534 return modified;
535}
536
537// ------ Edge finder + detectable precedences -------------
538
539// A class that implements two propagation algorithms: edge finding and
540// detectable precedences. These algorithms both push intervals to the right,
541// which is why they are grouped together.
542class EdgeFinderAndDetectablePrecedences {
543 public:
544 EdgeFinderAndDetectablePrecedences(Solver* const solver,
545 const std::vector<IntervalVar*>& intervals,
546 bool mirror, bool strict);
547 ~EdgeFinderAndDetectablePrecedences() {
548 gtl::STLDeleteElements(&by_start_min_);
549 }
550 int64 size() const { return by_start_min_.size(); }
551 IntervalVar* interval(int index) { return by_start_min_[index]->interval; }
552 void UpdateEst();
553 void OverloadChecking();
554 bool DetectablePrecedences();
555 bool EdgeFinder();
556
557 private:
558 Solver* const solver_;
559
560 // --- All the following member variables are essentially used as local ones:
561 // no invariant is maintained about them, except for the fact that the vectors
562 // always contains all the considered intervals, so any function that wants to
563 // use them must first sort them in the right order.
564
565 // All of these vectors store the same set of objects. Therefore, at
566 // destruction time, STLDeleteElements should be called on only one of them.
567 // It does not matter which one.
568
569 ThetaTree theta_tree_;
570 std::vector<DisjunctiveTask*> by_end_min_;
571 std::vector<DisjunctiveTask*> by_start_min_;
572 std::vector<DisjunctiveTask*> by_end_max_;
573 std::vector<DisjunctiveTask*> by_start_max_;
574 // new_est_[i] is the new start min for interval est_[i]->interval.
575 std::vector<int64> new_est_;
576 // new_lct_[i] is the new end max for interval est_[i]->interval.
577 std::vector<int64> new_lct_;
578 DisjunctiveLambdaThetaTree lt_tree_;
579 const bool strict_;
580};
581
582EdgeFinderAndDetectablePrecedences::EdgeFinderAndDetectablePrecedences(
583 Solver* const solver, const std::vector<IntervalVar*>& intervals,
584 bool mirror, bool strict)
585 : solver_(solver),
586 theta_tree_(intervals.size()),
587 lt_tree_(intervals.size()),
588 strict_(strict) {
589 // Populate of the array of intervals
590 for (IntervalVar* const interval : intervals) {
591 IntervalVar* const underlying =
592 mirror ? solver->MakeMirrorInterval(interval) : interval;
593 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMax(underlying);
594 DisjunctiveTask* const task = new DisjunctiveTask(relaxed);
595 by_end_min_.push_back(task);
596 by_start_min_.push_back(task);
597 by_end_max_.push_back(task);
598 by_start_max_.push_back(task);
599 new_est_.push_back(kint64min);
600 }
601}
602
603void EdgeFinderAndDetectablePrecedences::UpdateEst() {
604 std::sort(by_start_min_.begin(), by_start_min_.end(),
605 ShortestDurationStartMinLessThan<DisjunctiveTask>);
606 for (int i = 0; i < size(); ++i) {
607 by_start_min_[i]->index = i;
608 }
609}
610
611void EdgeFinderAndDetectablePrecedences::OverloadChecking() {
612 // Initialization.
613 UpdateEst();
614 std::sort(by_end_max_.begin(), by_end_max_.end(),
615 EndMaxLessThan<DisjunctiveTask>);
616 theta_tree_.Clear();
617
618 for (DisjunctiveTask* const task : by_end_max_) {
619 theta_tree_.Insert(task);
620 if (theta_tree_.Ect() > task->interval->EndMax()) {
621 solver_->Fail();
622 }
623 }
624}
625
626bool EdgeFinderAndDetectablePrecedences::DetectablePrecedences() {
627 // Initialization.
628 UpdateEst();
629 new_est_.assign(size(), kint64min);
630
631 // Propagate in one direction
632 std::sort(by_end_min_.begin(), by_end_min_.end(),
633 EndMinLessThan<DisjunctiveTask>);
634 std::sort(by_start_max_.begin(), by_start_max_.end(),
635 StartMaxLessThan<DisjunctiveTask>);
636 theta_tree_.Clear();
637 int j = 0;
638 for (DisjunctiveTask* const task_i : by_end_min_) {
639 if (j < size()) {
640 DisjunctiveTask* task_j = by_start_max_[j];
641 while (task_i->interval->EndMin() > task_j->interval->StartMax()) {
642 theta_tree_.Insert(task_j);
643 j++;
644 if (j == size()) break;
645 task_j = by_start_max_[j];
646 }
647 }
648 const int64 esti = task_i->interval->StartMin();
649 bool inserted = theta_tree_.IsInserted(task_i);
650 if (inserted) {
651 theta_tree_.Remove(task_i);
652 }
653 const int64 oesti = theta_tree_.Ect();
654 if (inserted) {
655 theta_tree_.Insert(task_i);
656 }
657 if (oesti > esti) {
658 new_est_[task_i->index] = oesti;
659 } else {
660 new_est_[task_i->index] = kint64min;
661 }
662 }
663
664 // Apply modifications
665 bool modified = false;
666 for (int i = 0; i < size(); ++i) {
667 IntervalVar* const var = by_start_min_[i]->interval;
668 if (new_est_[i] != kint64min && (strict_ || var->DurationMin() > 0)) {
669 modified = true;
670 by_start_min_[i]->interval->SetStartMin(new_est_[i]);
671 }
672 }
673 return modified;
674}
675
676bool EdgeFinderAndDetectablePrecedences::EdgeFinder() {
677 // Initialization.
678 UpdateEst();
679 for (int i = 0; i < size(); ++i) {
680 new_est_[i] = by_start_min_[i]->interval->StartMin();
681 }
682
683 // Push in one direction.
684 std::sort(by_end_max_.begin(), by_end_max_.end(),
685 EndMaxLessThan<DisjunctiveTask>);
686 lt_tree_.Clear();
687 for (int i = 0; i < size(); ++i) {
688 lt_tree_.Insert(*by_start_min_[i]);
689 DCHECK_EQ(i, by_start_min_[i]->index);
690 }
691 for (int j = size() - 2; j >= 0; --j) {
692 lt_tree_.Grey(*by_end_max_[j + 1]);
693 DisjunctiveTask* const twj = by_end_max_[j];
694 // We should have checked for overloading earlier.
695 DCHECK_LE(lt_tree_.Ect(), twj->interval->EndMax());
696 while (lt_tree_.EctOpt() > twj->interval->EndMax()) {
697 const int i = lt_tree_.ResponsibleOpt();
698 DCHECK_GE(i, 0);
699 if (lt_tree_.Ect() > new_est_[i]) {
700 new_est_[i] = lt_tree_.Ect();
701 }
702 lt_tree_.Reset(i);
703 }
704 }
705
706 // Apply modifications.
707 bool modified = false;
708 for (int i = 0; i < size(); ++i) {
709 IntervalVar* const var = by_start_min_[i]->interval;
710 if (var->StartMin() < new_est_[i] && (strict_ || var->DurationMin() > 0)) {
711 modified = true;
712 var->SetStartMin(new_est_[i]);
713 }
714 }
715 return modified;
716}
717
718// --------- Disjunctive Constraint ----------
719
720// ----- Propagation on ranked activities -----
721
722class RankedPropagator : public Constraint {
723 public:
724 RankedPropagator(Solver* const solver, const std::vector<IntVar*>& nexts,
725 const std::vector<IntervalVar*>& intervals,
726 const std::vector<IntVar*>& slacks,
727 DisjunctiveConstraint* const disjunctive)
728 : Constraint(solver),
729 nexts_(nexts),
730 intervals_(intervals),
731 slacks_(slacks),
732 disjunctive_(disjunctive),
733 partial_sequence_(intervals.size()),
734 previous_(intervals.size() + 2, 0) {}
735
736 ~RankedPropagator() override {}
737
738 void Post() override {
739 Demon* const delayed =
740 solver()->MakeDelayedConstraintInitialPropagateCallback(this);
741 for (int i = 0; i < intervals_.size(); ++i) {
742 nexts_[i]->WhenBound(delayed);
743 intervals_[i]->WhenAnything(delayed);
744 slacks_[i]->WhenRange(delayed);
745 }
746 nexts_.back()->WhenBound(delayed);
747 }
748
749 void InitialPropagate() override {
750 PropagateNexts();
751 PropagateSequence();
752 }
753
754 void PropagateNexts() {
755 Solver* const s = solver();
756 const int ranked_first = partial_sequence_.NumFirstRanked();
757 const int ranked_last = partial_sequence_.NumLastRanked();
758 const int sentinel =
759 ranked_last == 0
760 ? nexts_.size()
761 : partial_sequence_[intervals_.size() - ranked_last] + 1;
762 int first = 0;
763 int counter = 0;
764 while (nexts_[first]->Bound()) {
765 DCHECK_NE(first, nexts_[first]->Min());
766 first = nexts_[first]->Min();
767 if (first == sentinel) {
768 return;
769 }
770 if (++counter > ranked_first) {
771 DCHECK(intervals_[first - 1]->MayBePerformed());
772 partial_sequence_.RankFirst(s, first - 1);
773 VLOG(2) << "RankFirst " << first - 1 << " -> "
774 << partial_sequence_.DebugString();
775 }
776 }
777 previous_.assign(previous_.size(), -1);
778 for (int i = 0; i < nexts_.size(); ++i) {
779 if (nexts_[i]->Bound()) {
780 previous_[nexts_[i]->Min()] = i;
781 }
782 }
783 int last = previous_.size() - 1;
784 counter = 0;
785 while (previous_[last] != -1) {
786 last = previous_[last];
787 if (++counter > ranked_last) {
788 partial_sequence_.RankLast(s, last - 1);
789 VLOG(2) << "RankLast " << last - 1 << " -> "
790 << partial_sequence_.DebugString();
791 }
792 }
793 }
794
795 void PropagateSequence() {
796 const int last_position = intervals_.size() - 1;
797 const int first_sentinel = partial_sequence_.NumFirstRanked();
798 const int last_sentinel = last_position - partial_sequence_.NumLastRanked();
799 // Propagates on ranked first from left to right.
800 for (int i = 0; i < first_sentinel - 1; ++i) {
801 IntervalVar* const interval = RankedInterval(i);
802 IntervalVar* const next_interval = RankedInterval(i + 1);
803 IntVar* const slack = RankedSlack(i);
804 const int64 transition_time = RankedTransitionTime(i, i + 1);
805 next_interval->SetStartRange(
806 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
807 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
808 }
809 // Propagates on ranked last from right to left.
810 for (int i = last_position; i > last_sentinel + 1; --i) {
811 IntervalVar* const interval = RankedInterval(i - 1);
812 IntervalVar* const next_interval = RankedInterval(i);
813 IntVar* const slack = RankedSlack(i - 1);
814 const int64 transition_time = RankedTransitionTime(i - 1, i);
815 interval->SetStartRange(CapSub(next_interval->StartMin(),
816 CapAdd(slack->Max(), transition_time)),
817 CapSub(next_interval->StartMax(),
818 CapAdd(slack->Min(), transition_time)));
819 }
820 // Propagate across.
821 IntervalVar* const first_interval =
822 first_sentinel > 0 ? RankedInterval(first_sentinel - 1) : nullptr;
823 IntVar* const first_slack =
824 first_sentinel > 0 ? RankedSlack(first_sentinel - 1) : nullptr;
825 IntervalVar* const last_interval = last_sentinel < last_position
826 ? RankedInterval(last_sentinel + 1)
827 : nullptr;
828
829 // Nothing to do afterwards, exiting.
830 if (first_interval == nullptr && last_interval == nullptr) {
831 return;
832 }
833 // Propagates to the middle part.
834 // This assumes triangular inequality in the transition times.
835 for (int i = first_sentinel; i <= last_sentinel; ++i) {
836 IntervalVar* const interval = RankedInterval(i);
837 IntVar* const slack = RankedSlack(i);
838 if (interval->MayBePerformed()) {
839 const bool performed = interval->MustBePerformed();
840 if (first_interval != nullptr) {
841 const int64 transition_time =
842 RankedTransitionTime(first_sentinel - 1, i);
843 interval->SetStartRange(
844 CapAdd(first_interval->StartMin(),
845 CapAdd(first_slack->Min(), transition_time)),
846 CapAdd(first_interval->StartMax(),
847 CapAdd(first_slack->Max(), transition_time)));
848 if (performed) {
849 first_interval->SetStartRange(
850 CapSub(interval->StartMin(),
851 CapAdd(first_slack->Max(), transition_time)),
852 CapSub(interval->StartMax(),
853 CapAdd(first_slack->Min(), transition_time)));
854 }
855 }
856 if (last_interval != nullptr) {
857 const int64 transition_time =
858 RankedTransitionTime(i, last_sentinel + 1);
859 interval->SetStartRange(
860 CapSub(last_interval->StartMin(),
861 CapAdd(slack->Max(), transition_time)),
862 CapSub(last_interval->StartMax(),
863 CapAdd(slack->Min(), transition_time)));
864 if (performed) {
865 last_interval->SetStartRange(
866 CapAdd(interval->StartMin(),
867 CapAdd(slack->Min(), transition_time)),
868 CapAdd(interval->StartMax(),
869 CapAdd(slack->Max(), transition_time)));
870 }
871 }
872 }
873 }
874 // TODO(user): cache transition on ranked intervals in a vector.
875 // Propagates on ranked first from right to left.
876 for (int i = std::min(first_sentinel - 2, last_position - 1); i >= 0; --i) {
877 IntervalVar* const interval = RankedInterval(i);
878 IntervalVar* const next_interval = RankedInterval(i + 1);
879 IntVar* const slack = RankedSlack(i);
880 const int64 transition_time = RankedTransitionTime(i, i + 1);
881 interval->SetStartRange(CapSub(next_interval->StartMin(),
882 CapAdd(slack->Max(), transition_time)),
883 CapSub(next_interval->StartMax(),
884 CapAdd(slack->Min(), transition_time)));
885 }
886 // Propagates on ranked last from left to right.
887 for (int i = last_sentinel + 1; i < last_position - 1; ++i) {
888 IntervalVar* const interval = RankedInterval(i);
889 IntervalVar* const next_interval = RankedInterval(i + 1);
890 IntVar* const slack = RankedSlack(i);
891 const int64 transition_time = RankedTransitionTime(i, i + 1);
892 next_interval->SetStartRange(
893 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
894 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
895 }
896 // TODO(user) : Propagate on slacks.
897 }
898
899 IntervalVar* RankedInterval(int i) const {
900 const int index = partial_sequence_[i];
901 return intervals_[index];
902 }
903
904 IntVar* RankedSlack(int i) const {
905 const int index = partial_sequence_[i];
906 return slacks_[index];
907 }
908
909 int64 RankedTransitionTime(int before, int after) const {
910 const int before_index = partial_sequence_[before];
911 const int after_index = partial_sequence_[after];
912
913 return disjunctive_->TransitionTime(before_index, after_index);
914 }
915
916 std::string DebugString() const override {
917 return absl::StrFormat(
918 "RankedPropagator([%s], nexts = [%s], intervals = [%s])",
919 partial_sequence_.DebugString(), JoinDebugStringPtr(nexts_, ", "),
920 JoinDebugStringPtr(intervals_, ", "));
921 }
922
923 void Accept(ModelVisitor* const visitor) const override {
924 LOG(FATAL) << "Not yet implemented";
925 // TODO(user): IMPLEMENT ME.
926 }
927
928 private:
929 std::vector<IntVar*> nexts_;
930 std::vector<IntervalVar*> intervals_;
931 std::vector<IntVar*> slacks_;
932 DisjunctiveConstraint* const disjunctive_;
933 RevPartialSequence partial_sequence_;
934 std::vector<int> previous_;
935};
936
937// A class that stores several propagators for the sequence constraint, and
938// calls them until a fixpoint is reached.
939
940class FullDisjunctiveConstraint : public DisjunctiveConstraint {
941 public:
942 FullDisjunctiveConstraint(Solver* const s,
943 const std::vector<IntervalVar*>& intervals,
944 const std::string& name, bool strict)
945 : DisjunctiveConstraint(s, intervals, name),
946 sequence_var_(nullptr),
947 straight_(s, intervals, false, strict),
948 mirror_(s, intervals, true, strict),
949 straight_not_last_(s, intervals, false, strict),
950 mirror_not_last_(s, intervals, true, strict),
951 strict_(strict) {}
952
953 ~FullDisjunctiveConstraint() override {}
954
955 void Post() override {
956 Demon* const d = MakeDelayedConstraintDemon0(
957 solver(), this, &FullDisjunctiveConstraint::InitialPropagate,
958 "InitialPropagate");
959 for (int32 i = 0; i < straight_.size(); ++i) {
960 straight_.interval(i)->WhenAnything(d);
961 }
962 }
963
964 void InitialPropagate() override {
965 bool all_optional_or_unperformed = true;
966 for (const IntervalVar* const interval : intervals_) {
967 if (interval->MustBePerformed()) {
968 all_optional_or_unperformed = false;
969 break;
970 }
971 }
972 if (all_optional_or_unperformed) { // Nothing to deduce
973 return;
974 }
975
976 bool all_times_fixed = true;
977 for (const IntervalVar* const interval : intervals_) {
978 if (interval->MayBePerformed() &&
979 (interval->StartMin() != interval->StartMax() ||
980 interval->DurationMin() != interval->DurationMax() ||
981 interval->EndMin() != interval->EndMax())) {
982 all_times_fixed = false;
983 break;
984 }
985 }
986
987 if (all_times_fixed) {
988 PropagatePerformed();
989 } else {
990 do {
991 do {
992 do {
993 // OverloadChecking is symmetrical. It has the same effect on the
994 // straight and the mirrored version.
995 straight_.OverloadChecking();
996 } while (straight_.DetectablePrecedences() ||
997 mirror_.DetectablePrecedences());
998 } while (straight_not_last_.Propagate() ||
999 mirror_not_last_.Propagate());
1000 } while (straight_.EdgeFinder() || mirror_.EdgeFinder());
1001 }
1002 }
1003
1004 bool Intersect(IntervalVar* const i1, IntervalVar* const i2) const {
1005 return i1->StartMin() < i2->EndMax() && i2->StartMin() < i1->EndMax();
1006 }
1007
1008 void PropagatePerformed() {
1009 performed_.clear();
1010 optional_.clear();
1011 for (IntervalVar* const interval : intervals_) {
1012 if (interval->MustBePerformed()) {
1013 performed_.push_back(interval);
1014 } else if (interval->MayBePerformed()) {
1015 optional_.push_back(interval);
1016 }
1017 }
1018 // Checks feasibility of performed;
1019 if (performed_.empty()) return;
1020 std::sort(performed_.begin(), performed_.end(), IntervalStartMinLessThan);
1021 for (int i = 0; i < performed_.size() - 1; ++i) {
1022 if (performed_[i]->EndMax() > performed_[i + 1]->StartMin()) {
1023 solver()->Fail();
1024 }
1025 }
1026
1027 // Checks if optional intervals can be inserted.
1028 if (optional_.empty()) return;
1029 int index = 0;
1030 const int num_performed = performed_.size();
1031 std::sort(optional_.begin(), optional_.end(), IntervalStartMinLessThan);
1032 for (IntervalVar* const candidate : optional_) {
1033 const int64 start = candidate->StartMin();
1034 while (index < num_performed && start >= performed_[index]->EndMax()) {
1035 index++;
1036 }
1037 if (index == num_performed) return;
1038 if (Intersect(candidate, performed_[index]) ||
1039 (index < num_performed - 1 &&
1040 Intersect(candidate, performed_[index + 1]))) {
1041 candidate->SetPerformed(false);
1042 }
1043 }
1044 }
1045
1046 void Accept(ModelVisitor* const visitor) const override {
1047 visitor->BeginVisitConstraint(ModelVisitor::kDisjunctive, this);
1048 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
1049 intervals_);
1050 if (sequence_var_ != nullptr) {
1051 visitor->VisitSequenceArgument(ModelVisitor::kSequenceArgument,
1052 sequence_var_);
1053 }
1054 visitor->EndVisitConstraint(ModelVisitor::kDisjunctive, this);
1055 }
1056
1057 SequenceVar* MakeSequenceVar() override {
1058 BuildNextModelIfNeeded();
1059 if (sequence_var_ == nullptr) {
1060 solver()->SaveValue(reinterpret_cast<void**>(&sequence_var_));
1061 sequence_var_ = solver()->RevAlloc(
1062 new SequenceVar(solver(), intervals_, nexts_, name()));
1063 }
1064 return sequence_var_;
1065 }
1066
1067 std::string DebugString() const override {
1068 return absl::StrFormat("FullDisjunctiveConstraint([%s], %i)",
1069 JoinDebugStringPtr(intervals_, ", "), strict_);
1070 }
1071
1072 const std::vector<IntVar*>& nexts() const override { return nexts_; }
1073
1074 const std::vector<IntVar*>& actives() const override { return actives_; }
1075
1076 const std::vector<IntVar*>& time_cumuls() const override {
1077 return time_cumuls_;
1078 }
1079
1080 const std::vector<IntVar*>& time_slacks() const override {
1081 return time_slacks_;
1082 }
1083
1084 private:
1085 int64 Distance(int64 activity_plus_one, int64 next_activity_plus_one) {
1086 return (activity_plus_one == 0 ||
1087 next_activity_plus_one > intervals_.size())
1088 ? 0
1089 : transition_time_(activity_plus_one - 1,
1090 next_activity_plus_one - 1);
1091 }
1092
1093 void BuildNextModelIfNeeded() {
1094 if (!nexts_.empty()) {
1095 return;
1096 }
1097 Solver* const s = solver();
1098 const std::string& ct_name = name();
1099 const int num_intervals = intervals_.size();
1100 const int num_nodes = intervals_.size() + 1;
1101 int64 horizon = 0;
1102 for (int i = 0; i < intervals_.size(); ++i) {
1103 if (intervals_[i]->MayBePerformed()) {
1104 horizon = std::max(horizon, intervals_[i]->EndMax());
1105 }
1106 }
1107
1108 // Create the next model.
1109 s->MakeIntVarArray(num_nodes, 1, num_nodes, ct_name + "_nexts", &nexts_);
1110 // Alldifferent on the nexts variable (the equivalent problem is a tsp).
1111 s->AddConstraint(s->MakeAllDifferent(nexts_));
1112
1113 actives_.resize(num_nodes);
1114 for (int i = 0; i < num_intervals; ++i) {
1115 actives_[i + 1] = intervals_[i]->PerformedExpr()->Var();
1116 s->AddConstraint(
1117 s->MakeIsDifferentCstCt(nexts_[i + 1], i + 1, actives_[i + 1]));
1118 }
1119 std::vector<IntVar*> short_actives(actives_.begin() + 1, actives_.end());
1120 actives_[0] = s->MakeMax(short_actives)->Var();
1121
1122 // No Cycle on the corresponding tsp.
1123 s->AddConstraint(s->MakeNoCycle(nexts_, actives_));
1124
1125 // Cumul on time.
1126 time_cumuls_.resize(num_nodes + 1);
1127 // Slacks between activities.
1128 time_slacks_.resize(num_nodes);
1129
1130 time_slacks_[0] = s->MakeIntVar(0, horizon, "initial_slack");
1131 // TODO(user): check this.
1132 time_cumuls_[0] = s->MakeIntConst(0);
1133
1134 for (int64 i = 0; i < num_intervals; ++i) {
1135 IntervalVar* const var = intervals_[i];
1136 if (var->MayBePerformed()) {
1137 const int64 duration_min = var->DurationMin();
1138 time_slacks_[i + 1] = s->MakeIntVar(
1139 duration_min, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1140 // TODO(user): Check SafeStartExpr();
1141 time_cumuls_[i + 1] = var->SafeStartExpr(var->StartMin())->Var();
1142 if (var->DurationMax() != duration_min) {
1143 s->AddConstraint(s->MakeGreaterOrEqual(
1144 time_slacks_[i + 1], var->SafeDurationExpr(duration_min)));
1145 }
1146 } else {
1147 time_slacks_[i + 1] = s->MakeIntVar(
1148 0, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1149 time_cumuls_[i + 1] = s->MakeIntConst(horizon);
1150 }
1151 }
1152 // TODO(user): Find a better UB for the last time cumul.
1153 time_cumuls_[num_nodes] = s->MakeIntVar(0, 2 * horizon, ct_name + "_ect");
1154 s->AddConstraint(
1155 s->MakePathCumul(nexts_, actives_, time_cumuls_, time_slacks_,
1156 [this](int64 x, int64 y) { return Distance(x, y); }));
1157
1158 std::vector<IntVar*> short_slacks(time_slacks_.begin() + 1,
1159 time_slacks_.end());
1160 s->AddConstraint(s->RevAlloc(
1161 new RankedPropagator(s, nexts_, intervals_, short_slacks, this)));
1162 }
1163
1164 SequenceVar* sequence_var_;
1165 EdgeFinderAndDetectablePrecedences straight_;
1166 EdgeFinderAndDetectablePrecedences mirror_;
1167 NotLast straight_not_last_;
1168 NotLast mirror_not_last_;
1169 std::vector<IntVar*> nexts_;
1170 std::vector<IntVar*> actives_;
1171 std::vector<IntVar*> time_cumuls_;
1172 std::vector<IntVar*> time_slacks_;
1173 std::vector<IntervalVar*> performed_;
1174 std::vector<IntervalVar*> optional_;
1175 const bool strict_;
1176 DISALLOW_COPY_AND_ASSIGN(FullDisjunctiveConstraint);
1177};
1178
1179// =====================================================================
1180// Cumulative
1181// =====================================================================
1182
1183// A cumulative Theta node, where two energies, corresponding to 2 capacities,
1184// are stored.
1185struct DualCapacityThetaNode {
1186 // Special value for task indices meaning 'no such task'.
1187 static const int kNone;
1188
1189 // Identity constructor
1190 DualCapacityThetaNode()
1191 : energy(0LL),
1194
1195 // Constructor for a single cumulative task in the Theta set.
1196 DualCapacityThetaNode(int64 capacity, int64 residual_capacity,
1197 const CumulativeTask& task)
1198 : energy(task.EnergyMin()),
1199 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1201 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1202
1203 // Constructor for a single variable cumulative task in the Theta set.
1204 DualCapacityThetaNode(int64 capacity, int64 residual_capacity,
1205 const VariableCumulativeTask& task)
1206 : energy(task.EnergyMin()),
1207 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1209 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1210
1211 // Sets this DualCapacityThetaNode to the result of the natural binary
1212 // operation over the two given operands, corresponding to the following set
1213 // operation: Theta = left.Theta union right.Theta
1214 //
1215 // No set operation actually occur: we only maintain the relevant quantities
1216 // associated with such sets.
1217 void Compute(const DualCapacityThetaNode& left,
1218 const DualCapacityThetaNode& right) {
1219 energy = CapAdd(left.energy, right.energy);
1220 energetic_end_min = std::max(CapAdd(left.energetic_end_min, right.energy),
1221 right.energetic_end_min);
1223 std::max(CapAdd(left.residual_energetic_end_min, right.energy),
1224 right.residual_energetic_end_min);
1225 }
1226
1227 // Amount of resource consumed by the Theta set, in units of demand X time.
1228 // This is energy(Theta).
1229 int64 energy;
1230
1231 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
1233
1234 // Max_{subset S of Theta} (residual_capacity * start_min(S) + energy(S))
1236};
1237
1238const int DualCapacityThetaNode::kNone = -1;
1239
1240// A tree for dual capacity theta nodes
1241class DualCapacityThetaTree
1242 : public MonoidOperationTree<DualCapacityThetaNode> {
1243 public:
1245
1246 explicit DualCapacityThetaTree(int size)
1247 : MonoidOperationTree<DualCapacityThetaNode>(size),
1248 capacity_max_(-1),
1249 residual_capacity_(-1) {}
1250
1251 virtual ~DualCapacityThetaTree() {}
1252
1253 void Init(int64 capacity_max, int64 residual_capacity) {
1254 DCHECK_LE(0, residual_capacity);
1255 DCHECK_LE(residual_capacity, capacity_max);
1256 Clear();
1257 capacity_max_ = capacity_max;
1258 residual_capacity_ = residual_capacity;
1259 }
1260
1261 void Insert(const CumulativeTask* task) {
1262 Set(task->index,
1263 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1264 }
1265
1266 void Insert(const VariableCumulativeTask* task) {
1267 Set(task->index,
1268 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1269 }
1270
1271 private:
1272 int64 capacity_max_;
1273 int64 residual_capacity_;
1274 DISALLOW_COPY_AND_ASSIGN(DualCapacityThetaTree);
1275};
1276
1278
1279// An object that can dive down a branch of a DualCapacityThetaTree to compute
1280// Env(j, c) in Petr Vilim's notations.
1281//
1282// In 'Edge finding filtering algorithm for discrete cumulative resources in
1283// O(kn log n)' by Petr Vilim, this corresponds to line 6--8 in algorithm 1.3,
1284// plus all of algorithm 1.2.
1285//
1286// http://vilim.eu/petr/cp2009.pdf
1287// Note: use the version pointed to by this pointer, not the version from the
1288// conference proceedings, which has a few errors.
1289class EnvJCComputeDiver {
1290 public:
1291 static const int64 kNotAvailable;
1292 explicit EnvJCComputeDiver(int energy_threshold)
1293 : energy_threshold_(energy_threshold),
1294 energy_alpha_(kNotAvailable),
1295 energetic_end_min_alpha_(kNotAvailable) {}
1296 void OnArgumentReached(int index, const DualCapacityThetaNode& argument) {
1297 energy_alpha_ = argument.energy;
1298 energetic_end_min_alpha_ = argument.energetic_end_min;
1299 // We should reach a leaf that is not the identity
1300 // DCHECK_GT(energetic_end_min_alpha_, kint64min);
1301 // TODO(user): Check me.
1302 }
1303 bool ChooseGoLeft(const DualCapacityThetaNode& current,
1304 const DualCapacityThetaNode& left_child,
1305 const DualCapacityThetaNode& right_child) {
1306 if (right_child.residual_energetic_end_min > energy_threshold_) {
1307 return false; // enough energy on right
1308 } else {
1309 energy_threshold_ -= right_child.energy;
1310 return true;
1311 }
1312 }
1313 void OnComeBackFromLeft(const DualCapacityThetaNode& current,
1314 const DualCapacityThetaNode& left_child,
1315 const DualCapacityThetaNode& right_child) {
1316 // The left subtree intersects the alpha set.
1317 // The right subtree does not intersect the alpha set.
1318 // The energy_alpha_ and energetic_end_min_alpha_ previously
1319 // computed are valid for this node too: there's nothing to do.
1320 }
1321 void OnComeBackFromRight(const DualCapacityThetaNode& current,
1322 const DualCapacityThetaNode& left_child,
1323 const DualCapacityThetaNode& right_child) {
1324 // The left subtree is included in the alpha set.
1325 // The right subtree intersects the alpha set.
1326 energetic_end_min_alpha_ =
1327 std::max(energetic_end_min_alpha_,
1328 CapAdd(left_child.energetic_end_min, energy_alpha_));
1329 energy_alpha_ += left_child.energy;
1330 }
1331 int64 GetEnvJC(const DualCapacityThetaNode& root) const {
1332 const int64 energy = root.energy;
1333 const int64 energy_beta = CapSub(energy, energy_alpha_);
1334 return CapAdd(energetic_end_min_alpha_, energy_beta);
1335 }
1336
1337 private:
1338 // Energy threshold such that if a set has an energetic_end_min greater than
1339 // the threshold, then it can push tasks that must end at or after the
1340 // currently considered end max.
1341 //
1342 // Used when diving down only.
1343 int64 energy_threshold_;
1344
1345 // Energy of the alpha set, that is, the set of tasks whose start min does not
1346 // exceed the max start min of a set with excess residual energy.
1347 //
1348 // Used when swimming up only.
1349 int64 energy_alpha_;
1350
1351 // Energetic end min of the alpha set.
1352 //
1353 // Used when swimming up only.
1354 int64 energetic_end_min_alpha_;
1355};
1356
1358
1359// In all the following, the term 'update' means 'a potential new start min for
1360// a task'. The edge-finding algorithm is in two phase: one compute potential
1361// new start mins, the other detects whether they are applicable or not for each
1362// task.
1363
1364// Collection of all updates (i.e., potential new start mins) for a given value
1365// of the demand.
1366class UpdatesForADemand {
1367 public:
1368 explicit UpdatesForADemand(int size)
1369 : updates_(size, 0), up_to_date_(false) {}
1370
1371 const int64 Update(int index) { return updates_[index]; }
1372 void Reset() { up_to_date_ = false; }
1373 void SetUpdate(int index, int64 update) {
1374 DCHECK(!up_to_date_);
1375 DCHECK_LT(index, updates_.size());
1376 updates_[index] = update;
1377 }
1378 bool up_to_date() const { return up_to_date_; }
1379 void set_up_to_date() { up_to_date_ = true; }
1380
1381 private:
1382 std::vector<int64> updates_;
1383 bool up_to_date_;
1384 DISALLOW_COPY_AND_ASSIGN(UpdatesForADemand);
1385};
1386
1387// One-sided cumulative edge finder.
1388template <class Task>
1389class EdgeFinder : public Constraint {
1390 public:
1391 EdgeFinder(Solver* const solver, const std::vector<Task*>& tasks,
1392 IntVar* const capacity)
1393 : Constraint(solver),
1394 capacity_(capacity),
1395 tasks_(tasks),
1396 by_start_min_(tasks.size()),
1397 by_end_max_(tasks.size()),
1398 by_end_min_(tasks.size()),
1399 lt_tree_(tasks.size(), capacity_->Max()),
1400 dual_capacity_tree_(tasks.size()),
1401 has_zero_demand_tasks_(true) {}
1402
1403 ~EdgeFinder() override {
1404 gtl::STLDeleteElements(&tasks_);
1405 gtl::STLDeleteValues(&update_map_);
1406 }
1407
1408 void Post() override {
1409 // Add the demons
1410 Demon* const demon = MakeDelayedConstraintDemon0(
1411 solver(), this, &EdgeFinder::InitialPropagate, "RangeChanged");
1412 for (Task* const task : tasks_) {
1413 // Delay propagation, as this constraint is not incremental: we pay
1414 // O(n log n) each time the constraint is awakened.
1415 task->WhenAnything(demon);
1416 }
1417 capacity_->WhenRange(demon);
1418 }
1419
1420 // The propagation algorithms: checks for overloading, computes new start mins
1421 // according to the edge-finding rules, and applies them.
1422 void InitialPropagate() override {
1423 InitPropagation();
1424 PropagateBasedOnEndMinGreaterThanEndMax();
1425 FillInTree();
1426 PropagateBasedOnEnergy();
1427 ApplyNewBounds();
1428 }
1429
1430 void Accept(ModelVisitor* const visitor) const override {
1431 LOG(FATAL) << "Should Not Be Visited";
1432 }
1433
1434 std::string DebugString() const override { return "EdgeFinder"; }
1435
1436 private:
1437 UpdatesForADemand* GetOrMakeUpdate(int64 demand_min) {
1438 UpdatesForADemand* update = gtl::FindPtrOrNull(update_map_, demand_min);
1439 if (update == nullptr) {
1440 update = new UpdatesForADemand(tasks_.size());
1441 update_map_[demand_min] = update;
1442 }
1443 return update;
1444 }
1445
1446 // Sets the fields in a proper state to run the propagation algorithm.
1447 void InitPropagation() {
1448 // Clear the update stack
1449 start_min_update_.clear();
1450 // Re_init vectors if has_zero_demand_tasks_ is true
1451 if (has_zero_demand_tasks_.Value()) {
1452 by_start_min_.clear();
1453 by_end_min_.clear();
1454 by_end_max_.clear();
1455 // Only populate tasks with demand_min > 0.
1456 bool zero_demand = false;
1457 for (Task* const task : tasks_) {
1458 if (task->DemandMin() > 0) {
1459 by_start_min_.push_back(task);
1460 by_end_min_.push_back(task);
1461 by_end_max_.push_back(task);
1462 } else {
1463 zero_demand = true;
1464 }
1465 }
1466 if (!zero_demand) {
1467 has_zero_demand_tasks_.SetValue(solver(), false);
1468 }
1469 }
1470
1471 // sort by start min.
1472 std::sort(by_start_min_.begin(), by_start_min_.end(),
1473 StartMinLessThan<Task>);
1474 for (int i = 0; i < by_start_min_.size(); ++i) {
1475 by_start_min_[i]->index = i;
1476 }
1477 // Sort by end max.
1478 std::sort(by_end_max_.begin(), by_end_max_.end(), EndMaxLessThan<Task>);
1479 // Sort by end min.
1480 std::sort(by_end_min_.begin(), by_end_min_.end(), EndMinLessThan<Task>);
1481 // Initialize the tree with the new capacity.
1482 lt_tree_.Init(capacity_->Max());
1483 // Clear updates
1484 for (const auto& entry : update_map_) {
1485 entry.second->Reset();
1486 }
1487 }
1488
1489 // Computes all possible update values for tasks of given demand, and stores
1490 // these values in update_map_[demand].
1491 // Runs in O(n log n).
1492 // This corresponds to lines 2--13 in algorithm 1.3 in Petr Vilim's paper.
1493 void ComputeConditionalStartMins(UpdatesForADemand* updates,
1494 int64 demand_min) {
1495 DCHECK_GT(demand_min, 0);
1496 DCHECK(updates != nullptr);
1497 const int64 capacity_max = capacity_->Max();
1498 const int64 residual_capacity = CapSub(capacity_max, demand_min);
1499 dual_capacity_tree_.Init(capacity_max, residual_capacity);
1500 // It's important to initialize the update at IntervalVar::kMinValidValue
1501 // rather than at kInt64min, because its opposite may be used if it's a
1502 // mirror variable, and
1503 // -kInt64min = -(-kInt64max - 1) = kInt64max + 1 = -kInt64min
1504 int64 update = IntervalVar::kMinValidValue;
1505 for (int i = 0; i < by_end_max_.size(); ++i) {
1506 Task* const task = by_end_max_[i];
1507 if (task->EnergyMin() == 0) continue;
1508 const int64 current_end_max = task->interval->EndMax();
1509 dual_capacity_tree_.Insert(task);
1510 const int64 energy_threshold = residual_capacity * current_end_max;
1511 const DualCapacityThetaNode& root = dual_capacity_tree_.result();
1512 const int64 res_energetic_end_min = root.residual_energetic_end_min;
1513 if (res_energetic_end_min > energy_threshold) {
1514 EnvJCComputeDiver diver(energy_threshold);
1515 dual_capacity_tree_.DiveInTree(&diver);
1516 const int64 enjv = diver.GetEnvJC(dual_capacity_tree_.result());
1517 const int64 numerator = CapSub(enjv, energy_threshold);
1518 const int64 diff = MathUtil::CeilOfRatio(numerator, demand_min);
1519 update = std::max(update, diff);
1520 }
1521 updates->SetUpdate(i, update);
1522 }
1523 updates->set_up_to_date();
1524 }
1525
1526 // Returns the new start min that can be inferred for task_to_push if it is
1527 // proved that it cannot end before by_end_max[end_max_index] does.
1528 int64 ConditionalStartMin(const Task& task_to_push, int end_max_index) {
1529 if (task_to_push.EnergyMin() == 0) {
1530 return task_to_push.interval->StartMin();
1531 }
1532 const int64 demand_min = task_to_push.DemandMin();
1533 UpdatesForADemand* const updates = GetOrMakeUpdate(demand_min);
1534 if (!updates->up_to_date()) {
1535 ComputeConditionalStartMins(updates, demand_min);
1536 }
1537 DCHECK(updates->up_to_date());
1538 return updates->Update(end_max_index);
1539 }
1540
1541 // Propagates by discovering all end-after-end relationships purely based on
1542 // comparisons between end mins and end maxes: there is no energetic reasoning
1543 // here, but this allow updates that the standard edge-finding detection rule
1544 // misses.
1545 // See paragraph 6.2 in http://vilim.eu/petr/cp2009.pdf.
1546 void PropagateBasedOnEndMinGreaterThanEndMax() {
1547 int end_max_index = 0;
1548 int64 max_start_min = kint64min;
1549 for (Task* const task : by_end_min_) {
1550 const int64 end_min = task->interval->EndMin();
1551 while (end_max_index < by_start_min_.size() &&
1552 by_end_max_[end_max_index]->interval->EndMax() <= end_min) {
1553 max_start_min = std::max(
1554 max_start_min, by_end_max_[end_max_index]->interval->StartMin());
1555 ++end_max_index;
1556 }
1557 if (end_max_index > 0 && task->interval->StartMin() <= max_start_min &&
1558 task->interval->EndMax() > task->interval->EndMin()) {
1559 DCHECK_LE(by_end_max_[end_max_index - 1]->interval->EndMax(), end_min);
1560 // The update is valid and may be interesting:
1561 // * If task->StartMin() > max_start_min, then all tasks whose end_max
1562 // is less than or equal to end_min have a start min that is less
1563 // than task->StartMin(). In this case, any update we could
1564 // compute would also be computed by the standard edge-finding
1565 // rule. It's better not to compute it, then: it may not be
1566 // needed.
1567 // * If task->EndMax() <= task->EndMin(), that means the end max is
1568 // bound. In that case, 'task' itself belong to the set of tasks
1569 // that must end before end_min, which may cause the result of
1570 // ConditionalStartMin(task, end_max_index - 1) not to be a valid
1571 // update.
1572 const int64 update = ConditionalStartMin(*task, end_max_index - 1);
1573 start_min_update_.push_back(std::make_pair(task->interval, update));
1574 }
1575 }
1576 }
1577
1578 // Fill the theta-lambda-tree, and check for overloading.
1579 void FillInTree() {
1580 for (Task* const task : by_end_max_) {
1581 lt_tree_.Insert(*task);
1582 // Maximum energetic end min without overload.
1583 const int64 max_feasible =
1584 CapProd(capacity_->Max(), task->interval->EndMax());
1585 if (lt_tree_.energetic_end_min() > max_feasible) {
1586 solver()->Fail();
1587 }
1588 }
1589 }
1590
1591 // The heart of the propagation algorithm. Should be called with all tasks
1592 // being in the Theta set. It detects tasks that need to be pushed.
1593 void PropagateBasedOnEnergy() {
1594 for (int j = by_start_min_.size() - 2; j >= 0; --j) {
1595 lt_tree_.Grey(*by_end_max_[j + 1]);
1596 Task* const twj = by_end_max_[j];
1597 // We should have checked for overload earlier.
1598 const int64 max_feasible =
1599 CapProd(capacity_->Max(), twj->interval->EndMax());
1600 DCHECK_LE(lt_tree_.energetic_end_min(), max_feasible);
1601 while (lt_tree_.energetic_end_min_opt() > max_feasible) {
1602 const int i = lt_tree_.argmax_energetic_end_min_opt();
1603 DCHECK_GE(i, 0);
1604 PropagateTaskCannotEndBefore(i, j);
1605 lt_tree_.Reset(i);
1606 }
1607 }
1608 }
1609
1610 // Takes into account the fact that the task of given index cannot end before
1611 // the given new end min.
1612 void PropagateTaskCannotEndBefore(int index, int end_max_index) {
1613 Task* const task_to_push = by_start_min_[index];
1614 const int64 update = ConditionalStartMin(*task_to_push, end_max_index);
1615 start_min_update_.push_back(std::make_pair(task_to_push->interval, update));
1616 }
1617
1618 // Applies the previously computed updates.
1619 void ApplyNewBounds() {
1620 for (const std::pair<IntervalVar*, int64>& update : start_min_update_) {
1621 update.first->SetStartMin(update.second);
1622 }
1623 }
1624
1625 // Capacity of the cumulative resource.
1626 IntVar* const capacity_;
1627
1628 // Initial vector of tasks
1629 std::vector<Task*> tasks_;
1630
1631 // Cumulative tasks, ordered by non-decreasing start min.
1632 std::vector<Task*> by_start_min_;
1633
1634 // Cumulative tasks, ordered by non-decreasing end max.
1635 std::vector<Task*> by_end_max_;
1636
1637 // Cumulative tasks, ordered by non-decreasing end min.
1638 std::vector<Task*> by_end_min_;
1639
1640 // Cumulative theta-lamba tree.
1641 CumulativeLambdaThetaTree lt_tree_;
1642
1643 // Needed by ComputeConditionalStartMins.
1644 DualCapacityThetaTree dual_capacity_tree_;
1645
1646 // Stack of updates to the new start min to do.
1647 std::vector<std::pair<IntervalVar*, int64>> start_min_update_;
1648
1649 // update_map_[d][i] is an integer such that if a task
1650 // whose demand is d cannot end before by_end_max_[i], then it cannot start
1651 // before update_map_[d][i].
1652 absl::flat_hash_map<int64, UpdatesForADemand*> update_map_;
1653
1654 // Has one task a demand min == 0
1655 Rev<bool> has_zero_demand_tasks_;
1656
1657 DISALLOW_COPY_AND_ASSIGN(EdgeFinder);
1658};
1659
1660// A point in time where the usage profile changes.
1661// Starting from time (included), the usage is what it was immediately before
1662// time, plus the delta.
1663//
1664// Example:
1665// Consider the following vector of ProfileDelta's:
1666// { t=1, d=+3}, { t=4, d=+1 }, { t=5, d=-2}, { t=8, d=-1}
1667// This represents the following usage profile:
1668//
1669// usage
1670// 4 | ****.
1671// 3 | ************. .
1672// 2 | . . ************.
1673// 1 | . . . .
1674// 0 |*******----------------------------*******************-> time
1675// 0 1 2 3 4 5 6 7 8 9
1676//
1677// Note that the usage profile is right-continuous (see
1678// http://en.wikipedia.org/wiki/Left-continuous#Directional_continuity).
1679// This is because intervals for tasks are always closed on the start side
1680// and open on the end side.
1681struct ProfileDelta {
1682 ProfileDelta(int64 _time, int64 _delta) : time(_time), delta(_delta) {}
1685};
1686
1687bool TimeLessThan(const ProfileDelta& delta1, const ProfileDelta& delta2) {
1688 return delta1.time < delta2.time;
1689}
1690
1691// Cumulative time-table.
1692//
1693// This class implements a propagator for the CumulativeConstraint which is not
1694// incremental, and where a call to InitialPropagate() takes time which is
1695// O(n^2) and Omega(n log n) with n the number of cumulative tasks.
1696//
1697// Despite the high complexity, this propagator is needed, because of those
1698// implemented, it is the only one that satisfy that if all instantiated, no
1699// contradiction will be detected if and only if the constraint is satisfied.
1700//
1701// The implementation is quite naive, and could certainly be improved, for
1702// example by maintaining the profile incrementally.
1703template <class Task>
1704class CumulativeTimeTable : public Constraint {
1705 public:
1706 CumulativeTimeTable(Solver* const solver, const std::vector<Task*>& tasks,
1707 IntVar* const capacity)
1708 : Constraint(solver), by_start_min_(tasks), capacity_(capacity) {
1709 // There may be up to 2 delta's per interval (one on each side),
1710 // plus two sentinels
1711 const int profile_max_size = 2 * by_start_min_.size() + 2;
1712 profile_non_unique_time_.reserve(profile_max_size);
1713 profile_unique_time_.reserve(profile_max_size);
1714 }
1715
1716 ~CumulativeTimeTable() override { gtl::STLDeleteElements(&by_start_min_); }
1717
1718 void InitialPropagate() override {
1719 BuildProfile();
1720 PushTasks();
1721 // TODO(user): When a task has a fixed part, we could propagate
1722 // max_demand from its current location.
1723 }
1724
1725 void Post() override {
1726 Demon* demon = MakeDelayedConstraintDemon0(
1727 solver(), this, &CumulativeTimeTable::InitialPropagate,
1728 "InitialPropagate");
1729 for (Task* const task : by_start_min_) {
1730 task->WhenAnything(demon);
1731 }
1732 capacity_->WhenRange(demon);
1733 }
1734
1735 void Accept(ModelVisitor* const visitor) const override {
1736 LOG(FATAL) << "Should not be visited";
1737 }
1738
1739 std::string DebugString() const override { return "CumulativeTimeTable"; }
1740
1741 private:
1742 // Build the usage profile. Runs in O(n log n).
1743 void BuildProfile() {
1744 // Build profile with non unique time
1745 profile_non_unique_time_.clear();
1746 for (const Task* const task : by_start_min_) {
1747 const IntervalVar* const interval = task->interval;
1748 const int64 start_max = interval->StartMax();
1749 const int64 end_min = interval->EndMin();
1750 if (interval->MustBePerformed() && start_max < end_min) {
1751 const int64 demand_min = task->DemandMin();
1752 if (demand_min > 0) {
1753 profile_non_unique_time_.emplace_back(start_max, +demand_min);
1754 profile_non_unique_time_.emplace_back(end_min, -demand_min);
1755 }
1756 }
1757 }
1758 // Sort
1759 std::sort(profile_non_unique_time_.begin(), profile_non_unique_time_.end(),
1760 TimeLessThan);
1761 // Build profile with unique times
1762 profile_unique_time_.clear();
1763 profile_unique_time_.emplace_back(kint64min, 0);
1764 int64 usage = 0;
1765 for (const ProfileDelta& step : profile_non_unique_time_) {
1766 if (step.time == profile_unique_time_.back().time) {
1767 profile_unique_time_.back().delta += step.delta;
1768 } else {
1769 profile_unique_time_.push_back(step);
1770 }
1771 // Update usage.
1772 usage += step.delta;
1773 }
1774 // Check final usage to be 0.
1775 DCHECK_EQ(0, usage);
1776 // Scan to find max usage.
1777 int64 max_usage = 0;
1778 for (const ProfileDelta& step : profile_unique_time_) {
1779 usage += step.delta;
1780 if (usage > max_usage) {
1781 max_usage = usage;
1782 }
1783 }
1784 DCHECK_EQ(0, usage);
1785 capacity_->SetMin(max_usage);
1786 // Add a sentinel.
1787 profile_unique_time_.emplace_back(kint64max, 0);
1788 }
1789
1790 // Update the start min for all tasks. Runs in O(n^2) and Omega(n).
1791 void PushTasks() {
1792 std::sort(by_start_min_.begin(), by_start_min_.end(),
1793 StartMinLessThan<Task>);
1794 int64 usage = 0;
1795 int profile_index = 0;
1796 for (const Task* const task : by_start_min_) {
1797 const IntervalVar* const interval = task->interval;
1798 if (interval->StartMin() == interval->StartMax() &&
1799 interval->EndMin() == interval->EndMax()) {
1800 continue;
1801 }
1802 while (interval->StartMin() > profile_unique_time_[profile_index].time) {
1803 DCHECK(profile_index < profile_unique_time_.size());
1804 ++profile_index;
1805 usage += profile_unique_time_[profile_index].delta;
1806 }
1807 PushTask(task, profile_index, usage);
1808 }
1809 }
1810
1811 // Push the given task to new_start_min, defined as the smallest integer such
1812 // that the profile usage for all tasks, excluding the current one, does not
1813 // exceed capacity_ - task->demand on the interval
1814 // [new_start_min, new_start_min + task->interval->DurationMin() ).
1815 void PushTask(const Task* const task, int profile_index, int64 usage) {
1816 // Init
1817 const IntervalVar* const interval = task->interval;
1818 const int64 demand_min = task->DemandMin();
1819 if (demand_min == 0) { // Demand can be null, nothing to propagate.
1820 return;
1821 }
1822 const int64 residual_capacity = CapSub(capacity_->Max(), demand_min);
1823 const int64 duration = task->interval->DurationMin();
1824 const ProfileDelta& first_prof_delta = profile_unique_time_[profile_index];
1825
1826 int64 new_start_min = interval->StartMin();
1827
1828 DCHECK_GE(first_prof_delta.time, interval->StartMin());
1829 // The check above is with a '>='. Let's first treat the '>' case
1830 if (first_prof_delta.time > interval->StartMin()) {
1831 // There was no profile delta at a time between interval->StartMin()
1832 // (included) and the current one.
1833 // As we don't delete delta's of 0 value, this means the current task
1834 // does not contribute to the usage before:
1835 DCHECK((interval->StartMax() >= first_prof_delta.time) ||
1836 (interval->StartMax() >= interval->EndMin()));
1837 // The 'usage' given in argument is valid at first_prof_delta.time. To
1838 // compute the usage at the start min, we need to remove the last delta.
1839 const int64 usage_at_start_min = CapSub(usage, first_prof_delta.delta);
1840 if (usage_at_start_min > residual_capacity) {
1841 new_start_min = profile_unique_time_[profile_index].time;
1842 }
1843 }
1844
1845 // Influence of current task
1846 const int64 start_max = interval->StartMax();
1847 const int64 end_min = interval->EndMin();
1848 ProfileDelta delta_start(start_max, 0);
1849 ProfileDelta delta_end(end_min, 0);
1850 if (interval->MustBePerformed() && start_max < end_min) {
1851 delta_start.delta = +demand_min;
1852 delta_end.delta = -demand_min;
1853 }
1854 while (profile_unique_time_[profile_index].time <
1855 CapAdd(duration, new_start_min)) {
1856 const ProfileDelta& profile_delta = profile_unique_time_[profile_index];
1857 DCHECK(profile_index < profile_unique_time_.size());
1858 // Compensate for current task
1859 if (profile_delta.time == delta_start.time) {
1860 usage -= delta_start.delta;
1861 }
1862 if (profile_delta.time == delta_end.time) {
1863 usage -= delta_end.delta;
1864 }
1865 // Increment time
1866 ++profile_index;
1867 DCHECK(profile_index < profile_unique_time_.size());
1868 // Does it fit?
1869 if (usage > residual_capacity) {
1870 new_start_min = profile_unique_time_[profile_index].time;
1871 }
1872 usage += profile_unique_time_[profile_index].delta;
1873 }
1874 task->interval->SetStartMin(new_start_min);
1875 }
1876
1877 typedef std::vector<ProfileDelta> Profile;
1878
1879 Profile profile_unique_time_;
1880 Profile profile_non_unique_time_;
1881 std::vector<Task*> by_start_min_;
1882 IntVar* const capacity_;
1883
1884 DISALLOW_COPY_AND_ASSIGN(CumulativeTimeTable);
1885};
1886
1887// Cumulative idempotent Time-Table.
1888//
1889// This propagator is based on Letort et al. 2012 add Gay et al. 2015.
1890//
1891// TODO(user): fill the description once the incremental aspect are
1892// implemented.
1893//
1894// Worst case: O(n^2 log n) -- really unlikely in practice.
1895// Best case: Omega(1).
1896// Practical: Almost linear in the number of unfixed tasks.
1897template <class Task>
1898class TimeTableSync : public Constraint {
1899 public:
1900 TimeTableSync(Solver* const solver, const std::vector<Task*>& tasks,
1901 IntVar* const capacity)
1902 : Constraint(solver), tasks_(tasks), capacity_(capacity) {
1903 num_tasks_ = tasks_.size();
1904 gap_ = 0;
1905 prev_gap_ = 0;
1906 pos_ = kint64min;
1907 next_pos_ = kint64min;
1908 // Allocate vectors to contain no more than n_tasks.
1909 start_min_.reserve(num_tasks_);
1910 start_max_.reserve(num_tasks_);
1911 end_min_.reserve(num_tasks_);
1912 durations_.reserve(num_tasks_);
1913 demands_.reserve(num_tasks_);
1914 }
1915
1916 ~TimeTableSync() override { gtl::STLDeleteElements(&tasks_); }
1917
1918 void InitialPropagate() override {
1919 // Reset data structures.
1920 BuildEvents();
1921 while (!events_scp_.empty() && !events_ecp_.empty()) {
1922 // Move the sweep line.
1923 pos_ = NextEventTime();
1924 // Update the profile with compulsory part events.
1925 ProcessEventsScp();
1926 ProcessEventsEcp();
1927 // Update minimum capacity (may fail)
1928 capacity_->SetMin(capacity_->Max() - gap_);
1929 // Time to the next possible profile increase.
1930 next_pos_ = NextScpTime();
1931 // Consider new task to schedule.
1932 ProcessEventsPr();
1933 // Filter.
1934 FilterMin();
1935 }
1936 }
1937
1938 void Post() override {
1939 Demon* demon = MakeDelayedConstraintDemon0(
1940 solver(), this, &TimeTableSync::InitialPropagate, "InitialPropagate");
1941 for (Task* const task : tasks_) {
1942 task->WhenAnything(demon);
1943 }
1944 capacity_->WhenRange(demon);
1945 }
1946
1947 void Accept(ModelVisitor* const visitor) const override {
1948 LOG(FATAL) << "Should not be visited";
1949 }
1950
1951 std::string DebugString() const override { return "TimeTableSync"; }
1952
1953 private:
1954 // Task state.
1955 enum State { NONE, READY, CHECK, CONFLICT };
1956
1957 inline int64 NextScpTime() {
1958 return !events_scp_.empty() ? events_scp_.top().first : kint64max;
1959 }
1960
1961 inline int64 NextEventTime() {
1963 if (!events_pr_.empty()) {
1964 time = events_pr_.top().first;
1965 }
1966 if (!events_scp_.empty()) {
1967 int64 t = events_scp_.top().first;
1968 time = t < time ? t : time;
1969 }
1970 if (!events_ecp_.empty()) {
1971 int64 t = events_ecp_.top().first;
1972 time = t < time ? t : time;
1973 }
1974 return time;
1975 }
1976
1977 void ProcessEventsScp() {
1978 while (!events_scp_.empty() && events_scp_.top().first == pos_) {
1979 const int64 task_id = events_scp_.top().second;
1980 events_scp_.pop();
1981 const int64 old_end_min = end_min_[task_id];
1982 if (states_[task_id] == State::CONFLICT) {
1983 // Update cached values.
1984 const int64 new_end_min = pos_ + durations_[task_id];
1985 start_min_[task_id] = pos_;
1986 end_min_[task_id] = new_end_min;
1987 // Filter the domain
1988 tasks_[task_id]->interval->SetStartMin(pos_);
1989 }
1990 // The task is scheduled.
1991 states_[task_id] = State::READY;
1992 // Update the profile if the task has a compulsory part.
1993 if (pos_ < end_min_[task_id]) {
1994 gap_ -= demands_[task_id];
1995 if (old_end_min <= pos_) {
1996 events_ecp_.push(kv(end_min_[task_id], task_id));
1997 }
1998 }
1999 }
2000 }
2001
2002 void ProcessEventsEcp() {
2003 while (!events_ecp_.empty() && events_ecp_.top().first == pos_) {
2004 const int64 task_id = events_ecp_.top().second;
2005 events_ecp_.pop();
2006 // Update the event if it is not up to date.
2007 if (pos_ < end_min_[task_id]) {
2008 events_ecp_.push(kv(end_min_[task_id], task_id));
2009 } else {
2010 gap_ += demands_[task_id];
2011 }
2012 }
2013 }
2014
2015 void ProcessEventsPr() {
2016 while (!events_pr_.empty() && events_pr_.top().first == pos_) {
2017 const int64 task_id = events_pr_.top().second;
2018 events_pr_.pop();
2019 // The task is in conflict with the current profile.
2020 if (demands_[task_id] > gap_) {
2021 states_[task_id] = State::CONFLICT;
2022 conflict_.push(kv(demands_[task_id], task_id));
2023 continue;
2024 }
2025 // The task is not in conflict for the moment.
2026 if (next_pos_ < end_min_[task_id]) {
2027 states_[task_id] = State::CHECK;
2028 check_.push(kv(demands_[task_id], task_id));
2029 continue;
2030 }
2031 // The task is not in conflict and can be scheduled.
2032 states_[task_id] = State::READY;
2033 }
2034 }
2035
2036 void FilterMin() {
2037 // The profile exceeds the capacity.
2038 capacity_->SetMin(capacity_->Max() - gap_);
2039 // The profile has increased.
2040 if (gap_ < prev_gap_) {
2041 // Reconsider the task in check state.
2042 while (!check_.empty() && demands_[check_.top().second] > gap_) {
2043 const int64 task_id = check_.top().second;
2044 check_.pop();
2045 if (states_[task_id] == State::CHECK && pos_ < end_min_[task_id]) {
2046 states_[task_id] = State::CONFLICT;
2047 conflict_.push(kv(demands_[task_id], task_id));
2048 continue;
2049 }
2050 states_[task_id] = State::READY;
2051 }
2052 prev_gap_ = gap_;
2053 }
2054 // The profile has decreased.
2055 if (gap_ > prev_gap_) {
2056 // Reconsider the tasks in conflict.
2057 while (!conflict_.empty() && demands_[conflict_.top().second] <= gap_) {
2058 const int64 task_id = conflict_.top().second;
2059 conflict_.pop();
2060 if (states_[task_id] != State::CONFLICT) {
2061 continue;
2062 }
2063 const int64 old_end_min = end_min_[task_id];
2064 // Update the cache.
2065 start_min_[task_id] = pos_;
2066 end_min_[task_id] = pos_ + durations_[task_id];
2067 // Filter the domain.
2068 tasks_[task_id]->interval->SetStartMin(pos_); // should not fail.
2069 // The task still have to be checked.
2070 if (next_pos_ < end_min_[task_id]) {
2071 states_[task_id] = State::CHECK;
2072 check_.push(kv(demands_[task_id], task_id));
2073 } else {
2074 states_[task_id] = State::READY;
2075 }
2076 // Update possible compulsory part.
2077 const int64 start_max = start_max_[task_id];
2078 if (start_max >= old_end_min && start_max < end_min_[task_id]) {
2079 events_ecp_.push(kv(end_min_[task_id], task_id));
2080 }
2081 }
2082 }
2083 prev_gap_ = gap_;
2084 }
2085
2086 void BuildEvents() {
2087 // Reset the sweep line.
2088 pos_ = kint64min;
2089 next_pos_ = kint64min;
2090 gap_ = capacity_->Max();
2091 prev_gap_ = capacity_->Max();
2092 // Reset dynamic states.
2093 conflict_ = min_heap();
2094 check_ = max_heap();
2095 // Reset profile events.
2096 events_pr_ = min_heap();
2097 events_scp_ = min_heap();
2098 events_ecp_ = min_heap();
2099 // Reset cache.
2100 start_min_.clear();
2101 start_max_.clear();
2102 end_min_.clear();
2103 durations_.clear();
2104 demands_.clear();
2105 states_.clear();
2106 // Build events.
2107 for (int i = 0; i < num_tasks_; i++) {
2108 const int64 s_min = tasks_[i]->interval->StartMin();
2109 const int64 s_max = tasks_[i]->interval->StartMax();
2110 const int64 e_min = tasks_[i]->interval->EndMin();
2111 // Cache the values.
2112 start_min_.push_back(s_min);
2113 start_max_.push_back(s_max);
2114 end_min_.push_back(e_min);
2115 durations_.push_back(tasks_[i]->interval->DurationMin());
2116 demands_.push_back(tasks_[i]->DemandMin());
2117 // Reset task state.
2118 states_.push_back(State::NONE);
2119 // Start compulsory part event.
2120 events_scp_.push(kv(s_max, i));
2121 // Pruning event only if the start time of the task is not fixed.
2122 if (s_min != s_max) {
2123 events_pr_.push(kv(s_min, i));
2124 }
2125 // End of compulsory part only if the task has a compulsory part.
2126 if (s_max < e_min) {
2127 events_ecp_.push(kv(e_min, i));
2128 }
2129 }
2130 }
2131
2132 int64 num_tasks_;
2133 std::vector<Task*> tasks_;
2134 IntVar* const capacity_;
2135
2136 std::vector<int64> start_min_;
2137 std::vector<int64> start_max_;
2138 std::vector<int64> end_min_;
2139 std::vector<int64> end_max_;
2140 std::vector<int64> durations_;
2141 std::vector<int64> demands_;
2142
2143 // Pair key value.
2144 typedef std::pair<int64, int64> kv;
2145 typedef std::priority_queue<kv, std::vector<kv>, std::greater<kv>> min_heap;
2146 typedef std::priority_queue<kv, std::vector<kv>, std::less<kv>> max_heap;
2147
2148 // Profile events.
2149 min_heap events_pr_;
2150 min_heap events_scp_;
2151 min_heap events_ecp_;
2152
2153 // Task state.
2154 std::vector<State> states_;
2155 min_heap conflict_;
2156 max_heap check_;
2157
2158 // Sweep line state.
2159 int64 pos_;
2160 int64 next_pos_;
2161 int64 gap_;
2162 int64 prev_gap_;
2163};
2164
2165class CumulativeConstraint : public Constraint {
2166 public:
2167 CumulativeConstraint(Solver* const s,
2168 const std::vector<IntervalVar*>& intervals,
2169 const std::vector<int64>& demands,
2170 IntVar* const capacity, const std::string& name)
2171 : Constraint(s),
2172 capacity_(capacity),
2173 intervals_(intervals),
2174 demands_(demands) {
2175 tasks_.reserve(intervals.size());
2176 for (int i = 0; i < intervals.size(); ++i) {
2177 tasks_.push_back(CumulativeTask(intervals[i], demands[i]));
2178 }
2179 }
2180
2181 void Post() override {
2182 // For the cumulative constraint, there are many propagators, and they
2183 // don't dominate each other. So the strongest propagation is obtained
2184 // by posting a bunch of different propagators.
2185 const ConstraintSolverParameters& params = solver()->parameters();
2186 if (params.use_cumulative_time_table()) {
2187 if (params.use_cumulative_time_table_sync()) {
2188 PostOneSidedConstraint(false, false, true);
2189 PostOneSidedConstraint(true, false, true);
2190 } else {
2191 PostOneSidedConstraint(false, false, false);
2192 PostOneSidedConstraint(true, false, false);
2193 }
2194 }
2195 if (params.use_cumulative_edge_finder()) {
2196 PostOneSidedConstraint(false, true, false);
2197 PostOneSidedConstraint(true, true, false);
2198 }
2199 if (params.use_sequence_high_demand_tasks()) {
2200 PostHighDemandSequenceConstraint();
2201 }
2202 if (params.use_all_possible_disjunctions()) {
2203 PostAllDisjunctions();
2204 }
2205 }
2206
2207 void InitialPropagate() override {
2208 // Nothing to do: this constraint delegates all the work to other classes
2209 }
2210
2211 void Accept(ModelVisitor* const visitor) const override {
2212 // TODO(user): Build arrays on demand?
2213 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2214 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2215 intervals_);
2216 visitor->VisitIntegerArrayArgument(ModelVisitor::kDemandsArgument,
2217 demands_);
2218 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2219 capacity_);
2220 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2221 }
2222
2223 std::string DebugString() const override {
2224 return absl::StrFormat("CumulativeConstraint([%s], %s)",
2225 JoinDebugString(tasks_, ", "),
2226 capacity_->DebugString());
2227 }
2228
2229 private:
2230 // Post temporal disjunctions for tasks that cannot overlap.
2231 void PostAllDisjunctions() {
2232 for (int i = 0; i < intervals_.size(); ++i) {
2233 IntervalVar* const interval_i = intervals_[i];
2234 if (interval_i->MayBePerformed()) {
2235 for (int j = i + 1; j < intervals_.size(); ++j) {
2236 IntervalVar* const interval_j = intervals_[j];
2237 if (interval_j->MayBePerformed()) {
2238 if (CapAdd(tasks_[i].demand, tasks_[j].demand) > capacity_->Max()) {
2239 Constraint* const constraint =
2240 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2241 solver()->AddConstraint(constraint);
2242 }
2243 }
2244 }
2245 }
2246 }
2247 }
2248
2249 // Post a Sequence constraint for tasks that requires strictly more than half
2250 // of the resource
2251 void PostHighDemandSequenceConstraint() {
2252 Constraint* constraint = nullptr;
2253 { // Need a block to avoid memory leaks in case the AddConstraint fails
2254 std::vector<IntervalVar*> high_demand_intervals;
2255 high_demand_intervals.reserve(intervals_.size());
2256 for (int i = 0; i < demands_.size(); ++i) {
2257 const int64 demand = tasks_[i].demand;
2258 // Consider two tasks with demand d1 and d2 such that
2259 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2260 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2261 // > 1/2 (capacity_ + capacity_)
2262 // > capacity_.
2263 // Therefore these two tasks cannot overlap.
2264 if (demand * 2 > capacity_->Max() &&
2265 tasks_[i].interval->MayBePerformed()) {
2266 high_demand_intervals.push_back(tasks_[i].interval);
2267 }
2268 }
2269 if (high_demand_intervals.size() >= 2) {
2270 // If there are less than 2 such intervals, the constraint would do
2271 // nothing
2272 std::string seq_name = absl::StrCat(name(), "-HighDemandSequence");
2273 constraint = solver()->MakeDisjunctiveConstraint(high_demand_intervals,
2274 seq_name);
2275 }
2276 }
2277 if (constraint != nullptr) {
2278 solver()->AddConstraint(constraint);
2279 }
2280 }
2281
2282 // Populate the given vector with useful tasks, meaning the ones on which
2283 // some propagation can be done
2284 void PopulateVectorUsefulTasks(
2285 bool mirror, std::vector<CumulativeTask*>* const useful_tasks) {
2286 DCHECK(useful_tasks->empty());
2287 for (int i = 0; i < tasks_.size(); ++i) {
2288 const CumulativeTask& original_task = tasks_[i];
2289 IntervalVar* const interval = original_task.interval;
2290 // Check if exceed capacity
2291 if (original_task.demand > capacity_->Max()) {
2292 interval->SetPerformed(false);
2293 }
2294 // Add to the useful_task vector if it may be performed and that it
2295 // actually consumes some of the resource.
2296 if (interval->MayBePerformed() && original_task.demand > 0) {
2297 Solver* const s = solver();
2298 IntervalVar* const original_interval = original_task.interval;
2299 IntervalVar* const interval =
2300 mirror ? s->MakeMirrorInterval(original_interval)
2301 : original_interval;
2302 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2303 useful_tasks->push_back(
2304 new CumulativeTask(relaxed_max, original_task.demand));
2305 }
2306 }
2307 }
2308
2309 // Makes and return an edge-finder or a time table, or nullptr if it is not
2310 // necessary.
2311 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2312 bool tt_sync) {
2313 std::vector<CumulativeTask*> useful_tasks;
2314 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2315 if (useful_tasks.empty()) {
2316 return nullptr;
2317 } else {
2318 Solver* const s = solver();
2319 if (edge_finder) {
2320 const ConstraintSolverParameters& params = solver()->parameters();
2321 return useful_tasks.size() < params.max_edge_finder_size()
2322 ? s->RevAlloc(new EdgeFinder<CumulativeTask>(s, useful_tasks,
2323 capacity_))
2324 : nullptr;
2325 }
2326 if (tt_sync) {
2327 return s->RevAlloc(
2328 new TimeTableSync<CumulativeTask>(s, useful_tasks, capacity_));
2329 }
2330 return s->RevAlloc(
2331 new CumulativeTimeTable<CumulativeTask>(s, useful_tasks, capacity_));
2332 }
2333 }
2334
2335 // Post a straight or mirrored edge-finder, if needed
2336 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2337 Constraint* const constraint =
2338 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2339 if (constraint != nullptr) {
2340 solver()->AddConstraint(constraint);
2341 }
2342 }
2343
2344 // Capacity of the cumulative resource
2345 IntVar* const capacity_;
2346
2347 // The tasks that share the cumulative resource
2348 std::vector<CumulativeTask> tasks_;
2349
2350 // Array of intervals for the visitor.
2351 const std::vector<IntervalVar*> intervals_;
2352 // Array of demands for the visitor.
2353 const std::vector<int64> demands_;
2354
2355 DISALLOW_COPY_AND_ASSIGN(CumulativeConstraint);
2356};
2357
2358class VariableDemandCumulativeConstraint : public Constraint {
2359 public:
2360 VariableDemandCumulativeConstraint(Solver* const s,
2361 const std::vector<IntervalVar*>& intervals,
2362 const std::vector<IntVar*>& demands,
2363 IntVar* const capacity,
2364 const std::string& name)
2365 : Constraint(s),
2366 capacity_(capacity),
2367 intervals_(intervals),
2368 demands_(demands) {
2369 tasks_.reserve(intervals.size());
2370 for (int i = 0; i < intervals.size(); ++i) {
2371 tasks_.push_back(VariableCumulativeTask(intervals[i], demands[i]));
2372 }
2373 }
2374
2375 void Post() override {
2376 // For the cumulative constraint, there are many propagators, and they
2377 // don't dominate each other. So the strongest propagation is obtained
2378 // by posting a bunch of different propagators.
2379 const ConstraintSolverParameters& params = solver()->parameters();
2380 if (params.use_cumulative_time_table()) {
2381 PostOneSidedConstraint(false, false, false);
2382 PostOneSidedConstraint(true, false, false);
2383 }
2384 if (params.use_cumulative_edge_finder()) {
2385 PostOneSidedConstraint(false, true, false);
2386 PostOneSidedConstraint(true, true, false);
2387 }
2388 if (params.use_sequence_high_demand_tasks()) {
2389 PostHighDemandSequenceConstraint();
2390 }
2391 if (params.use_all_possible_disjunctions()) {
2392 PostAllDisjunctions();
2393 }
2394 }
2395
2396 void InitialPropagate() override {
2397 // Nothing to do: this constraint delegates all the work to other classes
2398 }
2399
2400 void Accept(ModelVisitor* const visitor) const override {
2401 // TODO(user): Build arrays on demand?
2402 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2403 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2404 intervals_);
2405 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kDemandsArgument,
2406 demands_);
2407 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2408 capacity_);
2409 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2410 }
2411
2412 std::string DebugString() const override {
2413 return absl::StrFormat("VariableDemandCumulativeConstraint([%s], %s)",
2414 JoinDebugString(tasks_, ", "),
2415 capacity_->DebugString());
2416 }
2417
2418 private:
2419 // Post temporal disjunctions for tasks that cannot overlap.
2420 void PostAllDisjunctions() {
2421 for (int i = 0; i < intervals_.size(); ++i) {
2422 IntervalVar* const interval_i = intervals_[i];
2423 if (interval_i->MayBePerformed()) {
2424 for (int j = i + 1; j < intervals_.size(); ++j) {
2425 IntervalVar* const interval_j = intervals_[j];
2426 if (interval_j->MayBePerformed()) {
2427 if (CapAdd(tasks_[i].demand->Min(), tasks_[j].demand->Min()) >
2428 capacity_->Max()) {
2429 Constraint* const constraint =
2430 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2431 solver()->AddConstraint(constraint);
2432 }
2433 }
2434 }
2435 }
2436 }
2437 }
2438
2439 // Post a Sequence constraint for tasks that requires strictly more than half
2440 // of the resource
2441 void PostHighDemandSequenceConstraint() {
2442 Constraint* constraint = nullptr;
2443 { // Need a block to avoid memory leaks in case the AddConstraint fails
2444 std::vector<IntervalVar*> high_demand_intervals;
2445 high_demand_intervals.reserve(intervals_.size());
2446 for (int i = 0; i < demands_.size(); ++i) {
2447 const int64 demand = tasks_[i].demand->Min();
2448 // Consider two tasks with demand d1 and d2 such that
2449 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2450 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2451 // > 1/2 (capacity_ + capacity_)
2452 // > capacity_.
2453 // Therefore these two tasks cannot overlap.
2454 if (demand * 2 > capacity_->Max() &&
2455 tasks_[i].interval->MayBePerformed()) {
2456 high_demand_intervals.push_back(tasks_[i].interval);
2457 }
2458 }
2459 if (high_demand_intervals.size() >= 2) {
2460 // If there are less than 2 such intervals, the constraint would do
2461 // nothing
2462 const std::string seq_name =
2463 absl::StrCat(name(), "-HighDemandSequence");
2464 constraint = solver()->MakeStrictDisjunctiveConstraint(
2465 high_demand_intervals, seq_name);
2466 }
2467 }
2468 if (constraint != nullptr) {
2469 solver()->AddConstraint(constraint);
2470 }
2471 }
2472
2473 // Populates the given vector with useful tasks, meaning the ones on which
2474 // some propagation can be done
2475 void PopulateVectorUsefulTasks(
2476 bool mirror, std::vector<VariableCumulativeTask*>* const useful_tasks) {
2477 DCHECK(useful_tasks->empty());
2478 for (int i = 0; i < tasks_.size(); ++i) {
2479 const VariableCumulativeTask& original_task = tasks_[i];
2480 IntervalVar* const interval = original_task.interval;
2481 // Check if exceed capacity
2482 if (original_task.demand->Min() > capacity_->Max()) {
2483 interval->SetPerformed(false);
2484 }
2485 // Add to the useful_task vector if it may be performed and that it
2486 // may actually consume some of the resource.
2487 if (interval->MayBePerformed() && original_task.demand->Max() > 0) {
2488 Solver* const s = solver();
2489 IntervalVar* const original_interval = original_task.interval;
2490 IntervalVar* const interval =
2491 mirror ? s->MakeMirrorInterval(original_interval)
2492 : original_interval;
2493 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2494 useful_tasks->push_back(
2495 new VariableCumulativeTask(relaxed_max, original_task.demand));
2496 }
2497 }
2498 }
2499
2500 // Makes and returns an edge-finder or a time table, or nullptr if it is not
2501 // necessary.
2502 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2503 bool tt_sync) {
2504 std::vector<VariableCumulativeTask*> useful_tasks;
2505 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2506 if (useful_tasks.empty()) {
2507 return nullptr;
2508 } else {
2509 Solver* const s = solver();
2510 if (edge_finder) {
2511 return s->RevAlloc(
2512 new EdgeFinder<VariableCumulativeTask>(s, useful_tasks, capacity_));
2513 }
2514 if (tt_sync) {
2515 return s->RevAlloc(new TimeTableSync<VariableCumulativeTask>(
2516 s, useful_tasks, capacity_));
2517 }
2518 return s->RevAlloc(new CumulativeTimeTable<VariableCumulativeTask>(
2519 s, useful_tasks, capacity_));
2520 }
2521 }
2522
2523 // Post a straight or mirrored edge-finder, if needed
2524 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2525 Constraint* const constraint =
2526 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2527 if (constraint != nullptr) {
2528 solver()->AddConstraint(constraint);
2529 }
2530 }
2531
2532 // Capacity of the cumulative resource
2533 IntVar* const capacity_;
2534
2535 // The tasks that share the cumulative resource
2536 std::vector<VariableCumulativeTask> tasks_;
2537
2538 // Array of intervals for the visitor.
2539 const std::vector<IntervalVar*> intervals_;
2540 // Array of demands for the visitor.
2541 const std::vector<IntVar*> demands_;
2542
2543 DISALLOW_COPY_AND_ASSIGN(VariableDemandCumulativeConstraint);
2544};
2545} // namespace
2546
2547// Sequence Constraint
2548
2549// ----- Public class -----
2550
2551DisjunctiveConstraint::DisjunctiveConstraint(
2552 Solver* const s, const std::vector<IntervalVar*>& intervals,
2553 const std::string& name)
2554 : Constraint(s), intervals_(intervals) {
2555 if (!name.empty()) {
2556 set_name(name);
2557 }
2558 transition_time_ = [](int64 x, int64 y) { return 0; };
2559}
2560
2562
2564 std::function<int64(int64, int64)> transition_time) {
2565 if (transition_time != nullptr) {
2566 transition_time_ = transition_time;
2567 } else {
2568 transition_time_ = [](int64 x, int64 y) { return 0; };
2569 }
2570}
2571
2572// ---------- Factory methods ----------
2573
2575 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2576 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, false));
2577}
2578
2580 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2581 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, true));
2582}
2583
2584// Demands are constant
2585
2586Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2587 const std::vector<int64>& demands,
2588 int64 capacity, const std::string& name) {
2589 CHECK_EQ(intervals.size(), demands.size());
2590 for (int i = 0; i < intervals.size(); ++i) {
2591 CHECK_GE(demands[i], 0);
2592 }
2593 if (capacity == 1 && AreAllOnes(demands)) {
2594 return MakeDisjunctiveConstraint(intervals, name);
2595 }
2596 return RevAlloc(new CumulativeConstraint(this, intervals, demands,
2598}
2599
2600Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2601 const std::vector<int>& demands,
2602 int64 capacity, const std::string& name) {
2603 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2604}
2605
2606Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2607 const std::vector<int64>& demands,
2608 IntVar* const capacity,
2609 const std::string& name) {
2610 CHECK_EQ(intervals.size(), demands.size());
2611 for (int i = 0; i < intervals.size(); ++i) {
2612 CHECK_GE(demands[i], 0);
2613 }
2614 return RevAlloc(
2615 new CumulativeConstraint(this, intervals, demands, capacity, name));
2616}
2617
2618Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2619 const std::vector<int>& demands,
2620 IntVar* const capacity,
2621 const std::string& name) {
2622 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2623}
2624
2625// Demands are variable
2626
2627Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2628 const std::vector<IntVar*>& demands,
2629 int64 capacity, const std::string& name) {
2630 CHECK_EQ(intervals.size(), demands.size());
2631 for (int i = 0; i < intervals.size(); ++i) {
2632 CHECK_GE(demands[i]->Min(), 0);
2633 }
2634 if (AreAllBound(demands)) {
2635 std::vector<int64> fixed_demands(demands.size());
2636 for (int i = 0; i < demands.size(); ++i) {
2637 fixed_demands[i] = demands[i]->Value();
2638 }
2639 return MakeCumulative(intervals, fixed_demands, capacity, name);
2640 }
2641 return RevAlloc(new VariableDemandCumulativeConstraint(
2642 this, intervals, demands, MakeIntConst(capacity), name));
2643}
2644
2645Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2646 const std::vector<IntVar*>& demands,
2647 IntVar* const capacity,
2648 const std::string& name) {
2649 CHECK_EQ(intervals.size(), demands.size());
2650 for (int i = 0; i < intervals.size(); ++i) {
2651 CHECK_GE(demands[i]->Min(), 0);
2652 }
2653 if (AreAllBound(demands)) {
2654 std::vector<int64> fixed_demands(demands.size());
2655 for (int i = 0; i < demands.size(); ++i) {
2656 fixed_demands[i] = demands[i]->Value();
2657 }
2658 return MakeCumulative(intervals, fixed_demands, capacity, name);
2659 }
2660 return RevAlloc(new VariableDemandCumulativeConstraint(
2661 this, intervals, demands, capacity, name));
2662}
2663} // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:697
#define CHECK_GE(val1, val2)
Definition: base/logging.h:701
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:890
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:884
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
#define VLOG(verboselevel)
Definition: base/logging.h:978
A constraint is the main modeling object.
void SetTransitionTime(Solver::IndexEvaluator2 transition_time)
Add a transition time between intervals.
Definition: resource.cc:2563
The class IntVar is a subset of IntExpr.
static IntegralType CeilOfRatio(IntegralType numerator, IntegralType denominator)
Definition: mathutil.h:39
virtual std::string name() const
Object naming.
DisjunctiveConstraint * MakeStrictDisjunctiveConstraint(const std::vector< IntervalVar * > &intervals, const std::string &name)
This constraint forces all interval vars into an non-overlapping sequence.
Definition: resource.cc:2579
Constraint * MakeCumulative(const std::vector< IntervalVar * > &intervals, const std::vector< int64 > &demands, int64 capacity, const std::string &name)
This constraint forces that, for any integer t, the sum of the demands corresponding to an interval c...
Definition: resource.cc:2586
DisjunctiveConstraint * MakeDisjunctiveConstraint(const std::vector< IntervalVar * > &intervals, const std::string &name)
This constraint forces all interval vars into an non-overlapping sequence.
Definition: resource.cc:2574
T * RevAlloc(T *object)
Registers the given object as being reversible.
IntVar * MakeIntConst(int64 val, const std::string &name)
IntConst will create a constant expression.
const std::string name
IntVar * var
Definition: expr_array.cc:1858
int int32
static const int64 kint64max
int64_t int64
static const int64 kint64min
const int FATAL
Definition: log_severity.h:32
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: macros.h:29
void STLDeleteValues(T *v)
Definition: stl_util.h:382
const Collection::value_type::second_type FindPtrOrNull(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition: map_util.h:70
void STLDeleteElements(T *container)
Definition: stl_util.h:372
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
int64 CapAdd(int64 x, int64 y)
Demon * MakeDelayedConstraintDemon0(Solver *const s, T *const ct, void(T::*method)(), const std::string &name)
bool AreAllBound(const std::vector< IntVar * > &vars)
int64 CapProd(int64 x, int64 y)
int64 CapSub(int64 x, int64 y)
std::string JoinDebugString(const std::vector< T > &v, const std::string &separator)
Definition: string_array.h:38
std::string JoinDebugStringPtr(const std::vector< T > &v, const std::string &separator)
Definition: string_array.h:45
std::vector< int64 > ToInt64Vector(const std::vector< int > &input)
Definition: utilities.cc:822
bool AreAllOnes(const std::vector< T > &values)
int64 total_ect
Definition: resource.cc:196
int64 time
Definition: resource.cc:1683
int64 energy
Definition: resource.cc:349
int64 energy_opt
Definition: resource.cc:355
static const int kNone
Definition: resource.cc:231
int64 demand
Definition: resource.cc:123
int argmax_energy_opt
Definition: resource.cc:359
int64 total_processing
Definition: resource.cc:195
int64 energetic_end_min
Definition: resource.cc:352
int64 delta
Definition: resource.cc:1684
int index
Definition: resource.cc:99
int64 residual_energetic_end_min
Definition: resource.cc:1235
static const int64 kNotInitialized
Definition: resource.cc:1244
static const int64 kNotAvailable
Definition: resource.cc:1291
int argmax_energetic_end_min_opt
Definition: resource.cc:367
IntervalVar * interval
Definition: resource.cc:98
int64 energetic_end_min_opt
Definition: resource.cc:363
int64 capacity
Rev< int64 > end_min
Rev< int > performed
Rev< int64 > start_max