Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
resource.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// This file contains implementations of several resource constraints.
15// The implemented constraints are:
16// * Disjunctive: forces a set of intervals to be non-overlapping
17// * Cumulative: forces a set of intervals with associated demands to be such
18// that the sum of demands of the intervals containing any given integer
19// does not exceed a capacity.
20// In addition, it implements the SequenceVar that allows ranking decisions
21// on a set of interval variables.
22
23#include <algorithm>
24#include <cstdint>
25#include <functional>
26#include <limits>
27#include <queue>
28#include <string>
29#include <utility>
30#include <vector>
31
32#include "absl/container/flat_hash_map.h"
33#include "absl/strings/str_cat.h"
34#include "absl/strings/str_format.h"
35#include "absl/strings/str_join.h"
36#include "absl/strings/string_view.h"
41#include "ortools/base/types.h"
44#include "ortools/util/bitset.h"
48
49namespace operations_research {
50namespace {
51// ----- Comparison functions -----
52
53// TODO(user): Tie breaking.
54
55// Comparison methods, used by the STL sort.
56template <class Task>
57bool StartMinLessThan(Task* const w1, Task* const w2) {
58 return (w1->interval->StartMin() < w2->interval->StartMin());
59}
60
61// A comparator that sorts the tasks by their effective earliest start time when
62// using the shortest duration possible. This comparator can be used when
63// sorting the tasks before they are inserted to a Theta-tree.
64template <class Task>
65bool ShortestDurationStartMinLessThan(Task* const w1, Task* const w2) {
66 return w1->interval->EndMin() - w1->interval->DurationMin() <
67 w2->interval->EndMin() - w2->interval->DurationMin();
68}
69
70template <class Task>
71bool StartMaxLessThan(Task* const w1, Task* const w2) {
72 return (w1->interval->StartMax() < w2->interval->StartMax());
73}
74
75template <class Task>
76bool EndMinLessThan(Task* const w1, Task* const w2) {
77 return (w1->interval->EndMin() < w2->interval->EndMin());
78}
79
80template <class Task>
81bool EndMaxLessThan(Task* const w1, Task* const w2) {
82 return (w1->interval->EndMax() < w2->interval->EndMax());
83}
84
85bool IntervalStartMinLessThan(IntervalVar* i1, IntervalVar* i2) {
86 return i1->StartMin() < i2->StartMin();
87}
88
89// ----- Wrappers around intervals -----
90
91// A DisjunctiveTask is a non-preemptive task sharing a disjunctive resource.
92// That is, it corresponds to an interval, and this interval cannot overlap with
93// any other interval of a DisjunctiveTask sharing the same resource.
94// It is indexed, that is it is aware of its position in a reference array.
95struct DisjunctiveTask {
96 explicit DisjunctiveTask(IntervalVar* const interval_)
97 : interval(interval_), index(-1) {}
98
99 std::string DebugString() const { return interval->DebugString(); }
100
101 IntervalVar* interval;
102 int index;
103};
104
105// A CumulativeTask is a non-preemptive task sharing a cumulative resource.
106// That is, it corresponds to an interval and a demand. The sum of demands of
107// all cumulative tasks CumulativeTasks sharing a resource of capacity c those
108// intervals contain any integer t cannot exceed c.
109// It is indexed, that is it is aware of its position in a reference array.
110struct CumulativeTask {
111 CumulativeTask(IntervalVar* const interval_, int64_t demand_)
112 : interval(interval_), demand(demand_), index(-1) {}
113
114 int64_t EnergyMin() const { return interval->DurationMin() * demand; }
115
116 int64_t DemandMin() const { return demand; }
117
118 void WhenAnything(Demon* const demon) { interval->WhenAnything(demon); }
119
120 std::string DebugString() const {
121 return absl::StrFormat("Task{ %s, demand: %d }", interval->DebugString(),
122 demand);
123 }
124
125 IntervalVar* interval;
126 int64_t demand;
127 int index;
128};
129
130// A VariableCumulativeTask is a non-preemptive task sharing a
131// cumulative resource. That is, it corresponds to an interval and a
132// demand. The sum of demands of all cumulative tasks
133// VariableCumulativeTasks sharing a resource of capacity c whose
134// intervals contain any integer t cannot exceed c. It is indexed,
135// that is it is aware of its position in a reference array.
136struct VariableCumulativeTask {
137 VariableCumulativeTask(IntervalVar* const interval_, IntVar* demand_)
138 : interval(interval_), demand(demand_), index(-1) {}
139
140 int64_t EnergyMin() const { return interval->DurationMin() * demand->Min(); }
141
142 int64_t DemandMin() const { return demand->Min(); }
143
144 void WhenAnything(Demon* const demon) {
145 interval->WhenAnything(demon);
146 demand->WhenRange(demon);
147 }
148
149 std::string DebugString() const {
150 return absl::StrFormat("Task{ %s, demand: %s }", interval->DebugString(),
151 demand->DebugString());
152 }
153
154 IntervalVar* const interval;
155 IntVar* const demand;
156 int index;
157};
158
159// ---------- Theta-Trees ----------
160
161// This is based on Petr Vilim (public) PhD work.
162// All names comes from his work. See http://vilim.eu/petr.
163
164// Node of a Theta-tree
165struct ThetaNode {
166 // Identity element
167 ThetaNode()
168 : total_processing(0), total_ect(std::numeric_limits<int64_t>::min()) {}
169
170 // Single interval element
171 explicit ThetaNode(const IntervalVar* const interval)
172 : total_processing(interval->DurationMin()),
173 total_ect(interval->EndMin()) {
174 // NOTE(user): Petr Vilim's thesis assumes that all tasks in the
175 // scheduling problem have fixed duration and that propagation already
176 // updated the bounds of the start/end times accordingly.
177 // The problem in this case is that the recursive formula for computing
178 // total_ect was only proved for the case where the duration is fixed; in
179 // our case, we use StartMin() + DurationMin() for the earliest completion
180 // time of a task, which should not break any assumptions, but may give
181 // bounds that are too loose.
182 }
183
184 void Compute(const ThetaNode& left, const ThetaNode& right) {
185 total_processing = CapAdd(left.total_processing, right.total_processing);
186 total_ect = std::max(CapAdd(left.total_ect, right.total_processing),
187 right.total_ect);
188 }
189
190 bool IsIdentity() const {
191 return total_processing == 0LL &&
192 total_ect == std::numeric_limits<int64_t>::min();
193 }
194
195 std::string DebugString() const {
196 return absl::StrCat("ThetaNode{ p = ", total_processing,
197 ", e = ", total_ect < 0LL ? -1LL : total_ect, " }");
198 }
199
201 int64_t total_ect;
202};
203
204// A theta-tree is a container for a set of intervals supporting the following
205// operations:
206// * Insertions and deletion in O(log size_), with size_ the maximal number of
207// tasks the tree may contain;
208// * Querying the following quantity in O(1):
209// Max_{subset S of the set of contained intervals} (
210// Min_{i in S}(i.StartMin) + Sum_{i in S}(i.DurationMin) )
211class ThetaTree : public MonoidOperationTree<ThetaNode> {
212 public:
213 explicit ThetaTree(int size) : MonoidOperationTree<ThetaNode>(size) {}
214
215 int64_t Ect() const { return result().total_ect; }
216
217 void Insert(const DisjunctiveTask* const task) {
218 Set(task->index, ThetaNode(task->interval));
219 }
220
221 void Remove(const DisjunctiveTask* const task) { Reset(task->index); }
222
223 bool IsInserted(const DisjunctiveTask* const task) const {
224 return !GetOperand(task->index).IsIdentity();
225 }
226};
227
228// ----------------- Lambda Theta Tree -----------------------
229
230// Lambda-theta-node
231// These nodes are cumulative lambda theta-node. This is reflected in the
232// terminology. They can also be used in the disjunctive case, and this incurs
233// no performance penalty.
234struct LambdaThetaNode {
235 // Special value for task indices meaning 'no such task'.
236 static const int kNone;
237
238 // Identity constructor
239 LambdaThetaNode()
240 : energy(0LL),
241 energetic_end_min(std::numeric_limits<int64_t>::min()),
242 energy_opt(0LL),
244 energetic_end_min_opt(std::numeric_limits<int64_t>::min()),
246
247 // Constructor for a single cumulative task in the Theta set
248 LambdaThetaNode(int64_t capacity, const CumulativeTask& task)
249 : energy(task.EnergyMin()),
250 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
255
256 // Constructor for a single cumulative task in the Lambda set
257 LambdaThetaNode(int64_t capacity, const CumulativeTask& task, int index)
258 : energy(0LL),
259 energetic_end_min(std::numeric_limits<int64_t>::min()),
260 energy_opt(task.EnergyMin()),
262 energetic_end_min_opt(capacity * task.interval->StartMin() +
263 energy_opt),
265 DCHECK_GE(index, 0);
266 }
267
268 // Constructor for a single cumulative task in the Theta set
269 LambdaThetaNode(int64_t capacity, const VariableCumulativeTask& task)
270 : energy(task.EnergyMin()),
271 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
276
277 // Constructor for a single cumulative task in the Lambda set
278 LambdaThetaNode(int64_t capacity, const VariableCumulativeTask& task,
279 int index)
280 : energy(0LL),
281 energetic_end_min(std::numeric_limits<int64_t>::min()),
282 energy_opt(task.EnergyMin()),
284 energetic_end_min_opt(capacity * task.interval->StartMin() +
285 energy_opt),
287 DCHECK_GE(index, 0);
288 }
289
290 // Constructor for a single interval in the Theta set
291 explicit LambdaThetaNode(const IntervalVar* const interval)
292 : energy(interval->DurationMin()),
293 energetic_end_min(interval->EndMin()),
294 energy_opt(interval->DurationMin()),
298
299 // Constructor for a single interval in the Lambda set
300 // 'index' is the index of the given interval in the est vector
301 LambdaThetaNode(const IntervalVar* const interval, int index)
302 : energy(0LL),
303 energetic_end_min(std::numeric_limits<int64_t>::min()),
304 energy_opt(interval->DurationMin()),
308 DCHECK_GE(index, 0);
309 }
310
311 // Sets this LambdaThetaNode to the result of the natural binary operations
312 // over the two given operands, corresponding to the following set operations:
313 // Theta = left.Theta union right.Theta
314 // Lambda = left.Lambda union right.Lambda
315 //
316 // No set operation actually occur: we only maintain the relevant quantities
317 // associated with such sets.
318 void Compute(const LambdaThetaNode& left, const LambdaThetaNode& right) {
319 energy = CapAdd(left.energy, right.energy);
320 energetic_end_min = std::max(right.energetic_end_min,
321 CapAdd(left.energetic_end_min, right.energy));
322 const int64_t energy_left_opt = CapAdd(left.energy_opt, right.energy);
323 const int64_t energy_right_opt = CapAdd(left.energy, right.energy_opt);
324 if (energy_left_opt > energy_right_opt) {
325 energy_opt = energy_left_opt;
326 argmax_energy_opt = left.argmax_energy_opt;
327 } else {
328 energy_opt = energy_right_opt;
329 argmax_energy_opt = right.argmax_energy_opt;
330 }
331 const int64_t ect1 = right.energetic_end_min_opt;
332 const int64_t ect2 = CapAdd(left.energetic_end_min, right.energy_opt);
333 const int64_t ect3 = CapAdd(left.energetic_end_min_opt, right.energy);
334 if (ect1 >= ect2 && ect1 >= ect3) { // ect1 max
336 argmax_energetic_end_min_opt = right.argmax_energetic_end_min_opt;
337 } else if (ect2 >= ect1 && ect2 >= ect3) { // ect2 max
339 argmax_energetic_end_min_opt = right.argmax_energy_opt;
340 } else { // ect3 max
342 argmax_energetic_end_min_opt = left.argmax_energetic_end_min_opt;
343 }
344 // The processing time, with one grey interval, should be no less than
345 // without any grey interval.
346 DCHECK(energy_opt >= energy);
347 // If there is no responsible grey interval for the processing time,
348 // the processing time with a grey interval should equal the one
349 // without.
350 DCHECK((argmax_energy_opt != kNone) || (energy_opt == energy));
351 }
352
353 // Amount of resource consumed by the Theta set, in units of demand X time.
354 // This is energy(Theta).
355 int64_t energy;
356
357 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
359
360 // Max_{i in Lambda} (energy(Theta union {i}))
361 int64_t energy_opt;
362
363 // The argmax in energy_opt_. It is the index of the chosen task in the Lambda
364 // set, if any, or kNone if none.
366
367 // Max_{subset S of Theta, i in Lambda}
368 // (capacity * start_min(S union {i}) + energy(S union {i}))
370
371 // The argmax in energetic_end_min_opt_. It is the index of the chosen task in
372 // the Lambda set, if any, or kNone if none.
374};
375
376const int LambdaThetaNode::kNone = -1;
377
378// Disjunctive Lambda-Theta tree
379class DisjunctiveLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
380 public:
381 explicit DisjunctiveLambdaThetaTree(int size)
382 : MonoidOperationTree<LambdaThetaNode>(size) {}
383
384 void Insert(const DisjunctiveTask& task) {
385 Set(task.index, LambdaThetaNode(task.interval));
386 }
387
388 void Grey(const DisjunctiveTask& task) {
389 const int index = task.index;
390 Set(index, LambdaThetaNode(task.interval, index));
391 }
392
393 int64_t Ect() const { return result().energetic_end_min; }
394 int64_t EctOpt() const { return result().energetic_end_min_opt; }
395 int ResponsibleOpt() const { return result().argmax_energetic_end_min_opt; }
396};
397
398// A cumulative lambda-theta tree
399class CumulativeLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
400 public:
401 CumulativeLambdaThetaTree(int size, int64_t capacity_max)
402 : MonoidOperationTree<LambdaThetaNode>(size),
403 capacity_max_(capacity_max) {}
404
405 void Init(int64_t capacity_max) {
406 Clear();
407 capacity_max_ = capacity_max;
408 }
409
410 void Insert(const CumulativeTask& task) {
411 Set(task.index, LambdaThetaNode(capacity_max_, task));
412 }
413
414 void Grey(const CumulativeTask& task) {
415 const int index = task.index;
416 Set(index, LambdaThetaNode(capacity_max_, task, index));
417 }
418
419 void Insert(const VariableCumulativeTask& task) {
420 Set(task.index, LambdaThetaNode(capacity_max_, task));
421 }
422
423 void Grey(const VariableCumulativeTask& task) {
424 const int index = task.index;
425 Set(index, LambdaThetaNode(capacity_max_, task, index));
426 }
427
428 int64_t energetic_end_min() const { return result().energetic_end_min; }
429 int64_t energetic_end_min_opt() const {
430 return result().energetic_end_min_opt;
431 }
432 int64_t Ect() const {
433 return MathUtil::CeilOfRatio(energetic_end_min(), capacity_max_);
434 }
435 int64_t EctOpt() const {
436 return MathUtil::CeilOfRatio(result().energetic_end_min_opt, capacity_max_);
437 }
438 int argmax_energetic_end_min_opt() const {
439 return result().argmax_energetic_end_min_opt;
440 }
441
442 private:
443 int64_t capacity_max_;
444};
445
446// -------------- Not Last -----------------------------------------
447
448// A class that implements the 'Not-Last' propagation algorithm for the unary
449// resource constraint.
450class NotLast {
451 public:
452 NotLast(Solver* solver, const std::vector<IntervalVar*>& intervals,
453 bool mirror, bool strict);
454
455 ~NotLast() { gtl::STLDeleteElements(&by_start_min_); }
456
457 bool Propagate();
458
459 private:
460 ThetaTree theta_tree_;
461 std::vector<DisjunctiveTask*> by_start_min_;
462 std::vector<DisjunctiveTask*> by_end_max_;
463 std::vector<DisjunctiveTask*> by_start_max_;
464 std::vector<int64_t> new_lct_;
465 const bool strict_;
466};
467
468NotLast::NotLast(Solver* const solver,
469 const std::vector<IntervalVar*>& intervals, bool mirror,
470 bool strict)
471 : theta_tree_(intervals.size()),
472 by_start_min_(intervals.size()),
473 by_end_max_(intervals.size()),
474 by_start_max_(intervals.size()),
475 new_lct_(intervals.size(), -1LL),
476 strict_(strict) {
477 // Populate the different vectors.
478 for (int i = 0; i < intervals.size(); ++i) {
479 IntervalVar* const underlying =
480 mirror ? solver->MakeMirrorInterval(intervals[i]) : intervals[i];
481 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMin(underlying);
482 by_start_min_[i] = new DisjunctiveTask(relaxed);
483 by_end_max_[i] = by_start_min_[i];
484 by_start_max_[i] = by_start_min_[i];
485 }
486}
487
488bool NotLast::Propagate() {
489 // ---- Init ----
490 std::sort(by_start_max_.begin(), by_start_max_.end(),
491 StartMaxLessThan<DisjunctiveTask>);
492 std::sort(by_end_max_.begin(), by_end_max_.end(),
493 EndMaxLessThan<DisjunctiveTask>);
494 // Update start min positions
495 std::sort(by_start_min_.begin(), by_start_min_.end(),
496 StartMinLessThan<DisjunctiveTask>);
497 for (int i = 0; i < by_start_min_.size(); ++i) {
498 by_start_min_[i]->index = i;
499 }
500 theta_tree_.Clear();
501 for (int i = 0; i < by_start_min_.size(); ++i) {
502 new_lct_[i] = by_start_min_[i]->interval->EndMax();
503 }
504
505 // --- Execute ----
506 int j = 0;
507 for (DisjunctiveTask* const twi : by_end_max_) {
508 while (j < by_start_max_.size() &&
509 twi->interval->EndMax() > by_start_max_[j]->interval->StartMax()) {
510 if (j > 0 && theta_tree_.Ect() > by_start_max_[j]->interval->StartMax()) {
511 const int64_t new_end_max = by_start_max_[j - 1]->interval->StartMax();
512 new_lct_[by_start_max_[j]->index] =
513 std::min(new_lct_[by_start_max_[j]->index], new_end_max);
514 }
515 theta_tree_.Insert(by_start_max_[j]);
516 j++;
517 }
518 const bool inserted = theta_tree_.IsInserted(twi);
519 if (inserted) {
520 theta_tree_.Remove(twi);
521 }
522 const int64_t ect_theta_less_i = theta_tree_.Ect();
523 if (inserted) {
524 theta_tree_.Insert(twi);
525 }
526
527 if (ect_theta_less_i > twi->interval->StartMax() && j > 0) {
528 const int64_t new_end_max = by_start_max_[j - 1]->interval->StartMax();
529 if (new_end_max < new_lct_[twi->index]) {
530 new_lct_[twi->index] = new_end_max;
531 }
532 }
533 }
534
535 // Apply modifications
536 bool modified = false;
537 for (int i = 0; i < by_start_min_.size(); ++i) {
538 IntervalVar* const var = by_start_min_[i]->interval;
539 if ((strict_ || var->DurationMin() > 0) && var->EndMax() > new_lct_[i]) {
540 modified = true;
541 var->SetEndMax(new_lct_[i]);
542 }
543 }
544 return modified;
545}
546
547// ------ Edge finder + detectable precedences -------------
548
549// A class that implements two propagation algorithms: edge finding and
550// detectable precedences. These algorithms both push intervals to the right,
551// which is why they are grouped together.
552class EdgeFinderAndDetectablePrecedences {
553 public:
554 EdgeFinderAndDetectablePrecedences(Solver* solver,
555 const std::vector<IntervalVar*>& intervals,
556 bool mirror, bool strict);
557 ~EdgeFinderAndDetectablePrecedences() {
558 gtl::STLDeleteElements(&by_start_min_);
559 }
560 int64_t size() const { return by_start_min_.size(); }
561 IntervalVar* interval(int index) { return by_start_min_[index]->interval; }
562 void UpdateEst();
563 void OverloadChecking();
564 bool DetectablePrecedences();
565 bool EdgeFinder();
566
567 private:
568 Solver* const solver_;
569
570 // --- All the following member variables are essentially used as local ones:
571 // no invariant is maintained about them, except for the fact that the vectors
572 // always contains all the considered intervals, so any function that wants to
573 // use them must first sort them in the right order.
574
575 // All of these vectors store the same set of objects. Therefore, at
576 // destruction time, STLDeleteElements should be called on only one of them.
577 // It does not matter which one.
578
579 ThetaTree theta_tree_;
580 std::vector<DisjunctiveTask*> by_end_min_;
581 std::vector<DisjunctiveTask*> by_start_min_;
582 std::vector<DisjunctiveTask*> by_end_max_;
583 std::vector<DisjunctiveTask*> by_start_max_;
584 // new_est_[i] is the new start min for interval est_[i]->interval.
585 std::vector<int64_t> new_est_;
586 // new_lct_[i] is the new end max for interval est_[i]->interval.
587 std::vector<int64_t> new_lct_;
588 DisjunctiveLambdaThetaTree lt_tree_;
589 const bool strict_;
590};
591
592EdgeFinderAndDetectablePrecedences::EdgeFinderAndDetectablePrecedences(
593 Solver* const solver, const std::vector<IntervalVar*>& intervals,
594 bool mirror, bool strict)
595 : solver_(solver),
596 theta_tree_(intervals.size()),
597 lt_tree_(intervals.size()),
598 strict_(strict) {
599 // Populate of the array of intervals
600 for (IntervalVar* const interval : intervals) {
601 IntervalVar* const underlying =
602 mirror ? solver->MakeMirrorInterval(interval) : interval;
603 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMax(underlying);
604 DisjunctiveTask* const task = new DisjunctiveTask(relaxed);
605 by_end_min_.push_back(task);
606 by_start_min_.push_back(task);
607 by_end_max_.push_back(task);
608 by_start_max_.push_back(task);
609 new_est_.push_back(std::numeric_limits<int64_t>::min());
610 }
611}
612
613void EdgeFinderAndDetectablePrecedences::UpdateEst() {
614 std::sort(by_start_min_.begin(), by_start_min_.end(),
615 ShortestDurationStartMinLessThan<DisjunctiveTask>);
616 for (int i = 0; i < size(); ++i) {
617 by_start_min_[i]->index = i;
618 }
619}
620
621void EdgeFinderAndDetectablePrecedences::OverloadChecking() {
622 // Initialization.
623 UpdateEst();
624 std::sort(by_end_max_.begin(), by_end_max_.end(),
625 EndMaxLessThan<DisjunctiveTask>);
626 theta_tree_.Clear();
627
628 for (DisjunctiveTask* const task : by_end_max_) {
629 theta_tree_.Insert(task);
630 if (theta_tree_.Ect() > task->interval->EndMax()) {
631 solver_->Fail();
632 }
633 }
634}
635
636bool EdgeFinderAndDetectablePrecedences::DetectablePrecedences() {
637 // Initialization.
638 UpdateEst();
639 new_est_.assign(size(), std::numeric_limits<int64_t>::min());
640
641 // Propagate in one direction
642 std::sort(by_end_min_.begin(), by_end_min_.end(),
643 EndMinLessThan<DisjunctiveTask>);
644 std::sort(by_start_max_.begin(), by_start_max_.end(),
645 StartMaxLessThan<DisjunctiveTask>);
646 theta_tree_.Clear();
647 int j = 0;
648 for (DisjunctiveTask* const task_i : by_end_min_) {
649 if (j < size()) {
650 DisjunctiveTask* task_j = by_start_max_[j];
651 while (task_i->interval->EndMin() > task_j->interval->StartMax()) {
652 theta_tree_.Insert(task_j);
653 j++;
654 if (j == size()) break;
655 task_j = by_start_max_[j];
656 }
657 }
658 const int64_t esti = task_i->interval->StartMin();
659 bool inserted = theta_tree_.IsInserted(task_i);
660 if (inserted) {
661 theta_tree_.Remove(task_i);
662 }
663 const int64_t oesti = theta_tree_.Ect();
664 if (inserted) {
665 theta_tree_.Insert(task_i);
666 }
667 if (oesti > esti) {
668 new_est_[task_i->index] = oesti;
669 } else {
670 new_est_[task_i->index] = std::numeric_limits<int64_t>::min();
671 }
672 }
673
674 // Apply modifications
675 bool modified = false;
676 for (int i = 0; i < size(); ++i) {
677 IntervalVar* const var = by_start_min_[i]->interval;
678 if (new_est_[i] != std::numeric_limits<int64_t>::min() &&
679 (strict_ || var->DurationMin() > 0)) {
680 modified = true;
681 by_start_min_[i]->interval->SetStartMin(new_est_[i]);
682 }
683 }
684 return modified;
685}
686
687bool EdgeFinderAndDetectablePrecedences::EdgeFinder() {
688 // Initialization.
689 UpdateEst();
690 for (int i = 0; i < size(); ++i) {
691 new_est_[i] = by_start_min_[i]->interval->StartMin();
692 }
693
694 // Push in one direction.
695 std::sort(by_end_max_.begin(), by_end_max_.end(),
696 EndMaxLessThan<DisjunctiveTask>);
697 lt_tree_.Clear();
698 for (int i = 0; i < size(); ++i) {
699 lt_tree_.Insert(*by_start_min_[i]);
700 DCHECK_EQ(i, by_start_min_[i]->index);
701 }
702 for (int j = size() - 2; j >= 0; --j) {
703 lt_tree_.Grey(*by_end_max_[j + 1]);
704 DisjunctiveTask* const twj = by_end_max_[j];
705 // We should have checked for overloading earlier.
706 DCHECK_LE(lt_tree_.Ect(), twj->interval->EndMax());
707 while (lt_tree_.EctOpt() > twj->interval->EndMax()) {
708 const int i = lt_tree_.ResponsibleOpt();
709 DCHECK_GE(i, 0);
710 if (lt_tree_.Ect() > new_est_[i]) {
711 new_est_[i] = lt_tree_.Ect();
712 }
713 lt_tree_.Reset(i);
714 }
715 }
716
717 // Apply modifications.
718 bool modified = false;
719 for (int i = 0; i < size(); ++i) {
720 IntervalVar* const var = by_start_min_[i]->interval;
721 if (var->StartMin() < new_est_[i] && (strict_ || var->DurationMin() > 0)) {
722 modified = true;
723 var->SetStartMin(new_est_[i]);
724 }
725 }
726 return modified;
727}
728
729// --------- Disjunctive Constraint ----------
730
731// ----- Propagation on ranked activities -----
732
733class RankedPropagator : public Constraint {
734 public:
735 RankedPropagator(Solver* const solver, const std::vector<IntVar*>& nexts,
736 const std::vector<IntervalVar*>& intervals,
737 const std::vector<IntVar*>& slacks,
738 DisjunctiveConstraint* const disjunctive)
739 : Constraint(solver),
740 nexts_(nexts),
741 intervals_(intervals),
742 slacks_(slacks),
743 disjunctive_(disjunctive),
744 partial_sequence_(intervals.size()),
745 previous_(intervals.size() + 2, 0) {}
746
747 ~RankedPropagator() override {}
748
749 void Post() override {
750 Demon* const delayed =
751 solver()->MakeDelayedConstraintInitialPropagateCallback(this);
752 for (int i = 0; i < intervals_.size(); ++i) {
753 nexts_[i]->WhenBound(delayed);
754 intervals_[i]->WhenAnything(delayed);
755 slacks_[i]->WhenRange(delayed);
756 }
757 nexts_.back()->WhenBound(delayed);
758 }
759
760 void InitialPropagate() override {
761 PropagateNexts();
762 PropagateSequence();
763 }
764
765 void PropagateNexts() {
766 Solver* const s = solver();
767 const int ranked_first = partial_sequence_.NumFirstRanked();
768 const int ranked_last = partial_sequence_.NumLastRanked();
769 const int sentinel =
770 ranked_last == 0
771 ? nexts_.size()
772 : partial_sequence_[intervals_.size() - ranked_last] + 1;
773 int first = 0;
774 int counter = 0;
775 while (nexts_[first]->Bound()) {
776 DCHECK_NE(first, nexts_[first]->Min());
777 first = nexts_[first]->Min();
778 if (first == sentinel) {
779 return;
780 }
781 if (++counter > ranked_first) {
782 DCHECK(intervals_[first - 1]->MayBePerformed());
783 partial_sequence_.RankFirst(s, first - 1);
784 VLOG(2) << "RankFirst " << first - 1 << " -> "
785 << partial_sequence_.DebugString();
786 }
787 }
788 previous_.assign(previous_.size(), -1);
789 for (int i = 0; i < nexts_.size(); ++i) {
790 if (nexts_[i]->Bound()) {
791 previous_[nexts_[i]->Min()] = i;
792 }
793 }
794 int last = previous_.size() - 1;
795 counter = 0;
796 while (previous_[last] != -1) {
797 last = previous_[last];
798 if (++counter > ranked_last) {
799 partial_sequence_.RankLast(s, last - 1);
800 VLOG(2) << "RankLast " << last - 1 << " -> "
801 << partial_sequence_.DebugString();
802 }
803 }
804 }
805
806 void PropagateSequence() {
807 const int last_position = intervals_.size() - 1;
808 const int first_sentinel = partial_sequence_.NumFirstRanked();
809 const int last_sentinel = last_position - partial_sequence_.NumLastRanked();
810 // Propagates on ranked first from left to right.
811 for (int i = 0; i < first_sentinel - 1; ++i) {
812 IntervalVar* const interval = RankedInterval(i);
813 IntervalVar* const next_interval = RankedInterval(i + 1);
814 IntVar* const slack = RankedSlack(i);
815 const int64_t transition_time = RankedTransitionTime(i, i + 1);
816 next_interval->SetStartRange(
817 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
818 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
819 }
820 // Propagates on ranked last from right to left.
821 for (int i = last_position; i > last_sentinel + 1; --i) {
822 IntervalVar* const interval = RankedInterval(i - 1);
823 IntervalVar* const next_interval = RankedInterval(i);
824 IntVar* const slack = RankedSlack(i - 1);
825 const int64_t transition_time = RankedTransitionTime(i - 1, i);
826 interval->SetStartRange(CapSub(next_interval->StartMin(),
827 CapAdd(slack->Max(), transition_time)),
828 CapSub(next_interval->StartMax(),
829 CapAdd(slack->Min(), transition_time)));
830 }
831 // Propagate across.
832 IntervalVar* const first_interval =
833 first_sentinel > 0 ? RankedInterval(first_sentinel - 1) : nullptr;
834 IntVar* const first_slack =
835 first_sentinel > 0 ? RankedSlack(first_sentinel - 1) : nullptr;
836 IntervalVar* const last_interval = last_sentinel < last_position
837 ? RankedInterval(last_sentinel + 1)
838 : nullptr;
839
840 // Nothing to do afterwards, exiting.
841 if (first_interval == nullptr && last_interval == nullptr) {
842 return;
843 }
844 // Propagates to the middle part.
845 // This assumes triangular inequality in the transition times.
846 for (int i = first_sentinel; i <= last_sentinel; ++i) {
847 IntervalVar* const interval = RankedInterval(i);
848 IntVar* const slack = RankedSlack(i);
849 if (interval->MayBePerformed()) {
850 const bool performed = interval->MustBePerformed();
851 if (first_interval != nullptr) {
852 const int64_t transition_time =
853 RankedTransitionTime(first_sentinel - 1, i);
854 interval->SetStartRange(
855 CapAdd(first_interval->StartMin(),
856 CapAdd(first_slack->Min(), transition_time)),
857 CapAdd(first_interval->StartMax(),
858 CapAdd(first_slack->Max(), transition_time)));
859 if (performed) {
860 first_interval->SetStartRange(
861 CapSub(interval->StartMin(),
862 CapAdd(first_slack->Max(), transition_time)),
863 CapSub(interval->StartMax(),
864 CapAdd(first_slack->Min(), transition_time)));
865 }
866 }
867 if (last_interval != nullptr) {
868 const int64_t transition_time =
869 RankedTransitionTime(i, last_sentinel + 1);
870 interval->SetStartRange(
871 CapSub(last_interval->StartMin(),
872 CapAdd(slack->Max(), transition_time)),
873 CapSub(last_interval->StartMax(),
874 CapAdd(slack->Min(), transition_time)));
875 if (performed) {
876 last_interval->SetStartRange(
877 CapAdd(interval->StartMin(),
878 CapAdd(slack->Min(), transition_time)),
879 CapAdd(interval->StartMax(),
880 CapAdd(slack->Max(), transition_time)));
881 }
882 }
883 }
884 }
885 // TODO(user): cache transition on ranked intervals in a vector.
886 // Propagates on ranked first from right to left.
887 for (int i = std::min(first_sentinel - 2, last_position - 1); i >= 0; --i) {
888 IntervalVar* const interval = RankedInterval(i);
889 IntervalVar* const next_interval = RankedInterval(i + 1);
890 IntVar* const slack = RankedSlack(i);
891 const int64_t transition_time = RankedTransitionTime(i, i + 1);
892 interval->SetStartRange(CapSub(next_interval->StartMin(),
893 CapAdd(slack->Max(), transition_time)),
894 CapSub(next_interval->StartMax(),
895 CapAdd(slack->Min(), transition_time)));
896 }
897 // Propagates on ranked last from left to right.
898 for (int i = last_sentinel + 1; i < last_position - 1; ++i) {
899 IntervalVar* const interval = RankedInterval(i);
900 IntervalVar* const next_interval = RankedInterval(i + 1);
901 IntVar* const slack = RankedSlack(i);
902 const int64_t transition_time = RankedTransitionTime(i, i + 1);
903 next_interval->SetStartRange(
904 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
905 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
906 }
907 // TODO(user) : Propagate on slacks.
908 }
909
910 IntervalVar* RankedInterval(int i) const {
911 const int index = partial_sequence_[i];
912 return intervals_[index];
913 }
914
915 IntVar* RankedSlack(int i) const {
916 const int index = partial_sequence_[i];
917 return slacks_[index];
918 }
919
920 int64_t RankedTransitionTime(int before, int after) const {
921 const int before_index = partial_sequence_[before];
922 const int after_index = partial_sequence_[after];
923
924 return disjunctive_->TransitionTime(before_index, after_index);
925 }
926
927 std::string DebugString() const override {
928 return absl::StrFormat(
929 "RankedPropagator([%s], nexts = [%s], intervals = [%s])",
930 partial_sequence_.DebugString(), JoinDebugStringPtr(nexts_, ", "),
931 JoinDebugStringPtr(intervals_, ", "));
932 }
933
934 void Accept(ModelVisitor* const visitor) const override {
935 LOG(FATAL) << "Not yet implemented";
936 // TODO(user): IMPLEMENT ME.
937 }
938
939 private:
940 std::vector<IntVar*> nexts_;
941 std::vector<IntervalVar*> intervals_;
942 std::vector<IntVar*> slacks_;
943 DisjunctiveConstraint* const disjunctive_;
944 RevPartialSequence partial_sequence_;
945 std::vector<int> previous_;
946};
947
948// A class that stores several propagators for the sequence constraint, and
949// calls them until a fixpoint is reached.
950
951class FullDisjunctiveConstraint : public DisjunctiveConstraint {
952 public:
953 FullDisjunctiveConstraint(Solver* const s,
954 const std::vector<IntervalVar*>& intervals,
955 const std::string& name, bool strict)
956 : DisjunctiveConstraint(s, intervals, name),
957 sequence_var_(nullptr),
958 straight_(s, intervals, false, strict),
959 mirror_(s, intervals, true, strict),
960 straight_not_last_(s, intervals, false, strict),
961 mirror_not_last_(s, intervals, true, strict),
962 strict_(strict) {}
963
964 // This type is neither copyable nor movable.
965 FullDisjunctiveConstraint(const FullDisjunctiveConstraint&) = delete;
966 FullDisjunctiveConstraint& operator=(const FullDisjunctiveConstraint&) =
967 delete;
968
969 ~FullDisjunctiveConstraint() override {}
970
971 void Post() override {
972 Demon* const d = MakeDelayedConstraintDemon0(
973 solver(), this, &FullDisjunctiveConstraint::InitialPropagate,
974 "InitialPropagate");
975 for (int32_t i = 0; i < straight_.size(); ++i) {
976 straight_.interval(i)->WhenAnything(d);
977 }
978 }
979
980 void InitialPropagate() override {
981 bool all_optional_or_unperformed = true;
982 for (const IntervalVar* const interval : intervals_) {
983 if (interval->MustBePerformed()) {
984 all_optional_or_unperformed = false;
985 break;
986 }
987 }
988 if (all_optional_or_unperformed) { // Nothing to deduce
989 return;
990 }
991
992 bool all_times_fixed = true;
993 for (const IntervalVar* const interval : intervals_) {
994 if (interval->MayBePerformed() &&
995 (interval->StartMin() != interval->StartMax() ||
996 interval->DurationMin() != interval->DurationMax() ||
997 interval->EndMin() != interval->EndMax())) {
998 all_times_fixed = false;
999 break;
1000 }
1001 }
1002
1003 if (all_times_fixed) {
1004 PropagatePerformed();
1005 } else {
1006 do {
1007 do {
1008 do {
1009 // OverloadChecking is symmetrical. It has the same effect on the
1010 // straight and the mirrored version.
1011 straight_.OverloadChecking();
1012 } while (straight_.DetectablePrecedences() ||
1013 mirror_.DetectablePrecedences());
1014 } while (straight_not_last_.Propagate() ||
1015 mirror_not_last_.Propagate());
1016 } while (straight_.EdgeFinder() || mirror_.EdgeFinder());
1017 }
1018 }
1019
1020 bool Intersect(IntervalVar* const i1, IntervalVar* const i2) const {
1021 return i1->StartMin() < i2->EndMax() && i2->StartMin() < i1->EndMax();
1022 }
1023
1024 void PropagatePerformed() {
1025 performed_.clear();
1026 optional_.clear();
1027 for (IntervalVar* const interval : intervals_) {
1028 if (interval->MustBePerformed()) {
1029 performed_.push_back(interval);
1030 } else if (interval->MayBePerformed()) {
1031 optional_.push_back(interval);
1032 }
1033 }
1034 // Checks feasibility of performed;
1035 if (performed_.empty()) return;
1036 std::sort(performed_.begin(), performed_.end(), IntervalStartMinLessThan);
1037 for (int i = 0; i < performed_.size() - 1; ++i) {
1038 if (performed_[i]->EndMax() > performed_[i + 1]->StartMin()) {
1039 solver()->Fail();
1040 }
1041 }
1042
1043 // Checks if optional intervals can be inserted.
1044 if (optional_.empty()) return;
1045 int index = 0;
1046 const int num_performed = performed_.size();
1047 std::sort(optional_.begin(), optional_.end(), IntervalStartMinLessThan);
1048 for (IntervalVar* const candidate : optional_) {
1049 const int64_t start = candidate->StartMin();
1050 while (index < num_performed && start >= performed_[index]->EndMax()) {
1051 index++;
1052 }
1053 if (index == num_performed) return;
1054 if (Intersect(candidate, performed_[index]) ||
1055 (index < num_performed - 1 &&
1056 Intersect(candidate, performed_[index + 1]))) {
1057 candidate->SetPerformed(false);
1058 }
1059 }
1060 }
1061
1062 void Accept(ModelVisitor* const visitor) const override {
1063 visitor->BeginVisitConstraint(ModelVisitor::kDisjunctive, this);
1064 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
1065 intervals_);
1066 if (sequence_var_ != nullptr) {
1067 visitor->VisitSequenceArgument(ModelVisitor::kSequenceArgument,
1068 sequence_var_);
1069 }
1070 visitor->EndVisitConstraint(ModelVisitor::kDisjunctive, this);
1071 }
1072
1073 SequenceVar* MakeSequenceVar() override {
1074 BuildNextModelIfNeeded();
1075 if (sequence_var_ == nullptr) {
1076 solver()->SaveValue(reinterpret_cast<void**>(&sequence_var_));
1077 sequence_var_ = solver()->RevAlloc(
1078 new SequenceVar(solver(), intervals_, nexts_, name()));
1079 }
1080 return sequence_var_;
1081 }
1082
1083 std::string DebugString() const override {
1084 return absl::StrFormat("FullDisjunctiveConstraint([%s], %i)",
1085 JoinDebugStringPtr(intervals_, ", "), strict_);
1086 }
1087
1088 const std::vector<IntVar*>& nexts() const override { return nexts_; }
1089
1090 const std::vector<IntVar*>& actives() const override { return actives_; }
1091
1092 const std::vector<IntVar*>& time_cumuls() const override {
1093 return time_cumuls_;
1094 }
1095
1096 const std::vector<IntVar*>& time_slacks() const override {
1097 return time_slacks_;
1098 }
1099
1100 private:
1101 int64_t Distance(int64_t activity_plus_one, int64_t next_activity_plus_one) {
1102 return (activity_plus_one == 0 ||
1103 next_activity_plus_one > intervals_.size())
1104 ? 0
1105 : transition_time_(activity_plus_one - 1,
1106 next_activity_plus_one - 1);
1107 }
1108
1109 void BuildNextModelIfNeeded() {
1110 if (!nexts_.empty()) {
1111 return;
1112 }
1113 Solver* const s = solver();
1114 const std::string& ct_name = name();
1115 const int num_intervals = intervals_.size();
1116 const int num_nodes = intervals_.size() + 1;
1117 int64_t horizon = 0;
1118 for (int i = 0; i < intervals_.size(); ++i) {
1119 if (intervals_[i]->MayBePerformed()) {
1120 horizon = std::max(horizon, intervals_[i]->EndMax());
1121 }
1122 }
1123
1124 // Create the next model.
1125 s->MakeIntVarArray(num_nodes, 1, num_nodes, ct_name + "_nexts", &nexts_);
1126 // Alldifferent on the nexts variable (the equivalent problem is a tsp).
1127 s->AddConstraint(s->MakeAllDifferent(nexts_));
1128
1129 actives_.resize(num_nodes);
1130 for (int i = 0; i < num_intervals; ++i) {
1131 actives_[i + 1] = intervals_[i]->PerformedExpr()->Var();
1132 s->AddConstraint(
1133 s->MakeIsDifferentCstCt(nexts_[i + 1], i + 1, actives_[i + 1]));
1134 }
1135 std::vector<IntVar*> short_actives(actives_.begin() + 1, actives_.end());
1136 actives_[0] = s->MakeMax(short_actives)->Var();
1137
1138 // No Cycle on the corresponding tsp.
1139 s->AddConstraint(s->MakeNoCycle(nexts_, actives_));
1140
1141 // Cumul on time.
1142 time_cumuls_.resize(num_nodes + 1);
1143 // Slacks between activities.
1144 time_slacks_.resize(num_nodes);
1145
1146 time_slacks_[0] = s->MakeIntVar(0, horizon, "initial_slack");
1147 // TODO(user): check this.
1148 time_cumuls_[0] = s->MakeIntConst(0);
1149
1150 for (int64_t i = 0; i < num_intervals; ++i) {
1151 IntervalVar* const var = intervals_[i];
1152 if (var->MayBePerformed()) {
1153 const int64_t duration_min = var->DurationMin();
1154 time_slacks_[i + 1] = s->MakeIntVar(
1155 duration_min, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1156 // TODO(user): Check SafeStartExpr();
1157 time_cumuls_[i + 1] = var->SafeStartExpr(var->StartMin())->Var();
1158 if (var->DurationMax() != duration_min) {
1159 s->AddConstraint(s->MakeGreaterOrEqual(
1160 time_slacks_[i + 1], var->SafeDurationExpr(duration_min)));
1161 }
1162 } else {
1163 time_slacks_[i + 1] = s->MakeIntVar(
1164 0, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1165 time_cumuls_[i + 1] = s->MakeIntConst(horizon);
1166 }
1167 }
1168 // TODO(user): Find a better UB for the last time cumul.
1169 time_cumuls_[num_nodes] = s->MakeIntVar(0, 2 * horizon, ct_name + "_ect");
1170 s->AddConstraint(s->MakePathCumul(
1171 nexts_, actives_, time_cumuls_, time_slacks_,
1172 [this](int64_t x, int64_t y) { return Distance(x, y); }));
1173
1174 std::vector<IntVar*> short_slacks(time_slacks_.begin() + 1,
1175 time_slacks_.end());
1176 s->AddConstraint(s->RevAlloc(
1177 new RankedPropagator(s, nexts_, intervals_, short_slacks, this)));
1178 }
1179
1180 SequenceVar* sequence_var_;
1181 EdgeFinderAndDetectablePrecedences straight_;
1182 EdgeFinderAndDetectablePrecedences mirror_;
1183 NotLast straight_not_last_;
1184 NotLast mirror_not_last_;
1185 std::vector<IntVar*> nexts_;
1186 std::vector<IntVar*> actives_;
1187 std::vector<IntVar*> time_cumuls_;
1188 std::vector<IntVar*> time_slacks_;
1189 std::vector<IntervalVar*> performed_;
1190 std::vector<IntervalVar*> optional_;
1191 const bool strict_;
1192};
1193
1194// =====================================================================
1195// Cumulative
1196// =====================================================================
1197
1198// A cumulative Theta node, where two energies, corresponding to 2 capacities,
1199// are stored.
1200struct DualCapacityThetaNode {
1201 // Special value for task indices meaning 'no such task'.
1202 static const int kNone;
1203
1204 // Identity constructor
1205 DualCapacityThetaNode()
1206 : energy(0LL),
1207 energetic_end_min(std::numeric_limits<int64_t>::min()),
1208 residual_energetic_end_min(std::numeric_limits<int64_t>::min()) {}
1209
1210 // Constructor for a single cumulative task in the Theta set.
1211 DualCapacityThetaNode(int64_t capacity, int64_t residual_capacity,
1212 const CumulativeTask& task)
1213 : energy(task.EnergyMin()),
1214 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1216 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1217
1218 // Constructor for a single variable cumulative task in the Theta set.
1219 DualCapacityThetaNode(int64_t capacity, int64_t residual_capacity,
1220 const VariableCumulativeTask& task)
1221 : energy(task.EnergyMin()),
1222 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1224 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1225
1226 // Sets this DualCapacityThetaNode to the result of the natural binary
1227 // operation over the two given operands, corresponding to the following set
1228 // operation: Theta = left.Theta union right.Theta
1229 //
1230 // No set operation actually occur: we only maintain the relevant quantities
1231 // associated with such sets.
1232 void Compute(const DualCapacityThetaNode& left,
1233 const DualCapacityThetaNode& right) {
1234 energy = CapAdd(left.energy, right.energy);
1235 energetic_end_min = std::max(CapAdd(left.energetic_end_min, right.energy),
1236 right.energetic_end_min);
1238 std::max(CapAdd(left.residual_energetic_end_min, right.energy),
1239 right.residual_energetic_end_min);
1240 }
1241
1242 // Amount of resource consumed by the Theta set, in units of demand X time.
1243 // This is energy(Theta).
1244 int64_t energy;
1245
1246 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
1247 int64_t energetic_end_min;
1248
1249 // Max_{subset S of Theta} (residual_capacity * start_min(S) + energy(S))
1251};
1253const int DualCapacityThetaNode::kNone = -1;
1254
1255// A tree for dual capacity theta nodes
1256class DualCapacityThetaTree
1257 : public MonoidOperationTree<DualCapacityThetaNode> {
1258 public:
1259 static const int64_t kNotInitialized;
1260
1261 explicit DualCapacityThetaTree(int size)
1262 : MonoidOperationTree<DualCapacityThetaNode>(size),
1263 capacity_max_(-1),
1264 residual_capacity_(-1) {}
1265
1266 // This type is neither copyable nor movable.
1267 DualCapacityThetaTree(const DualCapacityThetaTree&) = delete;
1268 DualCapacityThetaTree& operator=(const DualCapacityThetaTree&) = delete;
1269
1270 virtual ~DualCapacityThetaTree() {}
1271
1272 void Init(int64_t capacity_max, int64_t residual_capacity) {
1273 DCHECK_LE(0, residual_capacity);
1274 DCHECK_LE(residual_capacity, capacity_max);
1275 Clear();
1276 capacity_max_ = capacity_max;
1277 residual_capacity_ = residual_capacity;
1278 }
1279
1280 void Insert(const CumulativeTask* task) {
1281 Set(task->index,
1282 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1283 }
1284
1285 void Insert(const VariableCumulativeTask* task) {
1286 Set(task->index,
1287 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1288 }
1289
1290 private:
1291 int64_t capacity_max_;
1292 int64_t residual_capacity_;
1293};
1294
1295const int64_t DualCapacityThetaTree::kNotInitialized = -1LL;
1296
1297// An object that can dive down a branch of a DualCapacityThetaTree to compute
1298// Env(j, c) in Petr Vilim's notations.
1299//
1300// In 'Edge finding filtering algorithm for discrete cumulative resources in
1301// O(kn log n)' by Petr Vilim, this corresponds to line 6--8 in algorithm 1.3,
1302// plus all of algorithm 1.2.
1303//
1304// http://vilim.eu/petr/cp2009.pdf
1305// Note: use the version pointed to by this pointer, not the version from the
1306// conference proceedings, which has a few errors.
1307class EnvJCComputeDiver {
1308 public:
1309 static const int64_t kNotAvailable;
1310 explicit EnvJCComputeDiver(int energy_threshold)
1311 : energy_threshold_(energy_threshold),
1312 energy_alpha_(kNotAvailable),
1313 energetic_end_min_alpha_(kNotAvailable) {}
1314 void OnArgumentReached(int index, const DualCapacityThetaNode& argument) {
1315 energy_alpha_ = argument.energy;
1316 energetic_end_min_alpha_ = argument.energetic_end_min;
1317 // We should reach a leaf that is not the identity
1318 // DCHECK_GT(energetic_end_min_alpha_, kint64min);
1319 // TODO(user): Check me.
1320 }
1321 bool ChooseGoLeft(const DualCapacityThetaNode& current,
1322 const DualCapacityThetaNode& left_child,
1323 const DualCapacityThetaNode& right_child) {
1324 if (right_child.residual_energetic_end_min > energy_threshold_) {
1325 return false; // enough energy on right
1326 } else {
1327 energy_threshold_ -= right_child.energy;
1328 return true;
1329 }
1330 }
1331 void OnComeBackFromLeft(const DualCapacityThetaNode& current,
1332 const DualCapacityThetaNode& left_child,
1333 const DualCapacityThetaNode& right_child) {
1334 // The left subtree intersects the alpha set.
1335 // The right subtree does not intersect the alpha set.
1336 // The energy_alpha_ and energetic_end_min_alpha_ previously
1337 // computed are valid for this node too: there's nothing to do.
1338 }
1339 void OnComeBackFromRight(const DualCapacityThetaNode& current,
1340 const DualCapacityThetaNode& left_child,
1341 const DualCapacityThetaNode& right_child) {
1342 // The left subtree is included in the alpha set.
1343 // The right subtree intersects the alpha set.
1344 energetic_end_min_alpha_ =
1345 std::max(energetic_end_min_alpha_,
1346 CapAdd(left_child.energetic_end_min, energy_alpha_));
1347 energy_alpha_ += left_child.energy;
1348 }
1349 int64_t GetEnvJC(const DualCapacityThetaNode& root) const {
1350 const int64_t energy = root.energy;
1351 const int64_t energy_beta = CapSub(energy, energy_alpha_);
1352 return CapAdd(energetic_end_min_alpha_, energy_beta);
1353 }
1354
1355 private:
1356 // Energy threshold such that if a set has an energetic_end_min greater than
1357 // the threshold, then it can push tasks that must end at or after the
1358 // currently considered end max.
1359 //
1360 // Used when diving down only.
1361 int64_t energy_threshold_;
1362
1363 // Energy of the alpha set, that is, the set of tasks whose start min does not
1364 // exceed the max start min of a set with excess residual energy.
1365 //
1366 // Used when swimming up only.
1367 int64_t energy_alpha_;
1368
1369 // Energetic end min of the alpha set.
1370 //
1371 // Used when swimming up only.
1372 int64_t energetic_end_min_alpha_;
1373};
1374
1375const int64_t EnvJCComputeDiver::kNotAvailable = -1LL;
1376
1377// In all the following, the term 'update' means 'a potential new start min for
1378// a task'. The edge-finding algorithm is in two phase: one compute potential
1379// new start mins, the other detects whether they are applicable or not for each
1380// task.
1381
1382// Collection of all updates (i.e., potential new start mins) for a given value
1383// of the demand.
1384class UpdatesForADemand {
1385 public:
1386 explicit UpdatesForADemand(int size)
1387 : updates_(size, 0), up_to_date_(false) {}
1388
1389 // This type is neither copyable nor movable.
1390 UpdatesForADemand(const UpdatesForADemand&) = delete;
1391 UpdatesForADemand& operator=(const UpdatesForADemand&) = delete;
1392
1393 int64_t Update(int index) { return updates_[index]; }
1394 void Reset() { up_to_date_ = false; }
1395 void SetUpdate(int index, int64_t update) {
1396 DCHECK(!up_to_date_);
1397 DCHECK_LT(index, updates_.size());
1398 updates_[index] = update;
1399 }
1400 bool up_to_date() const { return up_to_date_; }
1401 void set_up_to_date() { up_to_date_ = true; }
1402
1403 private:
1404 std::vector<int64_t> updates_;
1405 bool up_to_date_;
1406};
1407
1408// One-sided cumulative edge finder.
1409template <class Task>
1410class EdgeFinder : public Constraint {
1411 public:
1412 EdgeFinder(Solver* const solver, const std::vector<Task*>& tasks,
1413 IntVar* const capacity)
1414 : Constraint(solver),
1415 capacity_(capacity),
1416 tasks_(tasks),
1417 by_start_min_(tasks.size()),
1418 by_end_max_(tasks.size()),
1419 by_end_min_(tasks.size()),
1420 lt_tree_(tasks.size(), capacity_->Max()),
1421 dual_capacity_tree_(tasks.size()),
1422 has_zero_demand_tasks_(true) {}
1423
1424 // This type is neither copyable nor movable.
1425 EdgeFinder(const EdgeFinder&) = delete;
1426 EdgeFinder& operator=(const EdgeFinder&) = delete;
1427
1428 ~EdgeFinder() override {
1429 gtl::STLDeleteElements(&tasks_);
1430 gtl::STLDeleteValues(&update_map_);
1431 }
1432
1433 void Post() override {
1434 // Add the demons
1435 Demon* const demon = MakeDelayedConstraintDemon0(
1436 solver(), this, &EdgeFinder::InitialPropagate, "RangeChanged");
1437 for (Task* const task : tasks_) {
1438 // Delay propagation, as this constraint is not incremental: we pay
1439 // O(n log n) each time the constraint is awakened.
1440 task->WhenAnything(demon);
1441 }
1442 capacity_->WhenRange(demon);
1443 }
1444
1445 // The propagation algorithms: checks for overloading, computes new start mins
1446 // according to the edge-finding rules, and applies them.
1447 void InitialPropagate() override {
1448 InitPropagation();
1449 PropagateBasedOnEndMinGreaterThanEndMax();
1450 FillInTree();
1451 PropagateBasedOnEnergy();
1452 ApplyNewBounds();
1453 }
1454
1455 void Accept(ModelVisitor* const visitor) const override {
1456 LOG(FATAL) << "Should Not Be Visited";
1457 }
1458
1459 std::string DebugString() const override { return "EdgeFinder"; }
1460
1461 private:
1462 UpdatesForADemand* GetOrMakeUpdate(int64_t demand_min) {
1463 UpdatesForADemand* update = gtl::FindPtrOrNull(update_map_, demand_min);
1464 if (update == nullptr) {
1465 update = new UpdatesForADemand(tasks_.size());
1466 update_map_[demand_min] = update;
1467 }
1468 return update;
1469 }
1470
1471 // Sets the fields in a proper state to run the propagation algorithm.
1472 void InitPropagation() {
1473 // Clear the update stack
1474 start_min_update_.clear();
1475 // Re_init vectors if has_zero_demand_tasks_ is true
1476 if (has_zero_demand_tasks_.Value()) {
1477 by_start_min_.clear();
1478 by_end_min_.clear();
1479 by_end_max_.clear();
1480 // Only populate tasks with demand_min > 0.
1481 bool zero_demand = false;
1482 for (Task* const task : tasks_) {
1483 if (task->DemandMin() > 0) {
1484 by_start_min_.push_back(task);
1485 by_end_min_.push_back(task);
1486 by_end_max_.push_back(task);
1487 } else {
1488 zero_demand = true;
1489 }
1490 }
1491 if (!zero_demand) {
1492 has_zero_demand_tasks_.SetValue(solver(), false);
1493 }
1494 }
1495
1496 // sort by start min.
1497 std::sort(by_start_min_.begin(), by_start_min_.end(),
1498 StartMinLessThan<Task>);
1499 for (int i = 0; i < by_start_min_.size(); ++i) {
1500 by_start_min_[i]->index = i;
1501 }
1502 // Sort by end max.
1503 std::sort(by_end_max_.begin(), by_end_max_.end(), EndMaxLessThan<Task>);
1504 // Sort by end min.
1505 std::sort(by_end_min_.begin(), by_end_min_.end(), EndMinLessThan<Task>);
1506 // Initialize the tree with the new capacity.
1507 lt_tree_.Init(capacity_->Max());
1508 // Clear updates
1509 for (const auto& entry : update_map_) {
1510 entry.second->Reset();
1511 }
1512 }
1513
1514 // Computes all possible update values for tasks of given demand, and stores
1515 // these values in update_map_[demand].
1516 // Runs in O(n log n).
1517 // This corresponds to lines 2--13 in algorithm 1.3 in Petr Vilim's paper.
1518 void ComputeConditionalStartMins(UpdatesForADemand* updates,
1519 int64_t demand_min) {
1520 DCHECK_GT(demand_min, 0);
1521 DCHECK(updates != nullptr);
1522 const int64_t capacity_max = capacity_->Max();
1523 const int64_t residual_capacity = CapSub(capacity_max, demand_min);
1524 dual_capacity_tree_.Init(capacity_max, residual_capacity);
1525 // It's important to initialize the update at IntervalVar::kMinValidValue
1526 // rather than at kInt64min, because its opposite may be used if it's a
1527 // mirror variable, and
1528 // -kInt64min = -(-kInt64max - 1) = kInt64max + 1 = -kInt64min
1529 int64_t update = IntervalVar::kMinValidValue;
1530 for (int i = 0; i < by_end_max_.size(); ++i) {
1531 Task* const task = by_end_max_[i];
1532 if (task->EnergyMin() == 0) continue;
1533 const int64_t current_end_max = task->interval->EndMax();
1534 dual_capacity_tree_.Insert(task);
1535 const int64_t energy_threshold = residual_capacity * current_end_max;
1536 const DualCapacityThetaNode& root = dual_capacity_tree_.result();
1537 const int64_t res_energetic_end_min = root.residual_energetic_end_min;
1538 if (res_energetic_end_min > energy_threshold) {
1539 EnvJCComputeDiver diver(energy_threshold);
1540 dual_capacity_tree_.DiveInTree(&diver);
1541 const int64_t enjv = diver.GetEnvJC(dual_capacity_tree_.result());
1542 const int64_t numerator = CapSub(enjv, energy_threshold);
1543 const int64_t diff = MathUtil::CeilOfRatio(numerator, demand_min);
1544 update = std::max(update, diff);
1545 }
1546 updates->SetUpdate(i, update);
1547 }
1548 updates->set_up_to_date();
1549 }
1550
1551 // Returns the new start min that can be inferred for task_to_push if it is
1552 // proved that it cannot end before by_end_max[end_max_index] does.
1553 int64_t ConditionalStartMin(const Task& task_to_push, int end_max_index) {
1554 if (task_to_push.EnergyMin() == 0) {
1555 return task_to_push.interval->StartMin();
1556 }
1557 const int64_t demand_min = task_to_push.DemandMin();
1558 UpdatesForADemand* const updates = GetOrMakeUpdate(demand_min);
1559 if (!updates->up_to_date()) {
1560 ComputeConditionalStartMins(updates, demand_min);
1561 }
1562 DCHECK(updates->up_to_date());
1563 return updates->Update(end_max_index);
1564 }
1565
1566 // Propagates by discovering all end-after-end relationships purely based on
1567 // comparisons between end mins and end maxes: there is no energetic reasoning
1568 // here, but this allow updates that the standard edge-finding detection rule
1569 // misses.
1570 // See paragraph 6.2 in http://vilim.eu/petr/cp2009.pdf.
1571 void PropagateBasedOnEndMinGreaterThanEndMax() {
1572 int end_max_index = 0;
1573 int64_t max_start_min = std::numeric_limits<int64_t>::min();
1574 for (Task* const task : by_end_min_) {
1575 const int64_t end_min = task->interval->EndMin();
1576 while (end_max_index < by_start_min_.size() &&
1577 by_end_max_[end_max_index]->interval->EndMax() <= end_min) {
1578 max_start_min = std::max(
1579 max_start_min, by_end_max_[end_max_index]->interval->StartMin());
1580 ++end_max_index;
1581 }
1582 if (end_max_index > 0 && task->interval->StartMin() <= max_start_min &&
1583 task->interval->EndMax() > task->interval->EndMin()) {
1584 DCHECK_LE(by_end_max_[end_max_index - 1]->interval->EndMax(), end_min);
1585 // The update is valid and may be interesting:
1586 // * If task->StartMin() > max_start_min, then all tasks whose end_max
1587 // is less than or equal to end_min have a start min that is less
1588 // than task->StartMin(). In this case, any update we could
1589 // compute would also be computed by the standard edge-finding
1590 // rule. It's better not to compute it, then: it may not be
1591 // needed.
1592 // * If task->EndMax() <= task->EndMin(), that means the end max is
1593 // bound. In that case, 'task' itself belong to the set of tasks
1594 // that must end before end_min, which may cause the result of
1595 // ConditionalStartMin(task, end_max_index - 1) not to be a valid
1596 // update.
1597 const int64_t update = ConditionalStartMin(*task, end_max_index - 1);
1598 start_min_update_.push_back(std::make_pair(task->interval, update));
1599 }
1600 }
1601 }
1602
1603 // Fill the theta-lambda-tree, and check for overloading.
1604 void FillInTree() {
1605 for (Task* const task : by_end_max_) {
1606 lt_tree_.Insert(*task);
1607 // Maximum energetic end min without overload.
1608 const int64_t max_feasible =
1609 CapProd(capacity_->Max(), task->interval->EndMax());
1610 if (lt_tree_.energetic_end_min() > max_feasible) {
1611 solver()->Fail();
1612 }
1613 }
1614 }
1615
1616 // The heart of the propagation algorithm. Should be called with all tasks
1617 // being in the Theta set. It detects tasks that need to be pushed.
1618 void PropagateBasedOnEnergy() {
1619 for (int j = by_start_min_.size() - 2; j >= 0; --j) {
1620 lt_tree_.Grey(*by_end_max_[j + 1]);
1621 Task* const twj = by_end_max_[j];
1622 // We should have checked for overload earlier.
1623 const int64_t max_feasible =
1624 CapProd(capacity_->Max(), twj->interval->EndMax());
1625 DCHECK_LE(lt_tree_.energetic_end_min(), max_feasible);
1626 while (lt_tree_.energetic_end_min_opt() > max_feasible) {
1627 const int i = lt_tree_.argmax_energetic_end_min_opt();
1628 DCHECK_GE(i, 0);
1629 PropagateTaskCannotEndBefore(i, j);
1630 lt_tree_.Reset(i);
1631 }
1632 }
1633 }
1634
1635 // Takes into account the fact that the task of given index cannot end before
1636 // the given new end min.
1637 void PropagateTaskCannotEndBefore(int index, int end_max_index) {
1638 Task* const task_to_push = by_start_min_[index];
1639 const int64_t update = ConditionalStartMin(*task_to_push, end_max_index);
1640 start_min_update_.push_back(std::make_pair(task_to_push->interval, update));
1641 }
1642
1643 // Applies the previously computed updates.
1644 void ApplyNewBounds() {
1645 for (const std::pair<IntervalVar*, int64_t>& update : start_min_update_) {
1646 update.first->SetStartMin(update.second);
1647 }
1648 }
1649
1650 // Capacity of the cumulative resource.
1651 IntVar* const capacity_;
1652
1653 // Initial vector of tasks
1654 std::vector<Task*> tasks_;
1655
1656 // Cumulative tasks, ordered by non-decreasing start min.
1657 std::vector<Task*> by_start_min_;
1658
1659 // Cumulative tasks, ordered by non-decreasing end max.
1660 std::vector<Task*> by_end_max_;
1661
1662 // Cumulative tasks, ordered by non-decreasing end min.
1663 std::vector<Task*> by_end_min_;
1664
1665 // Cumulative theta-lamba tree.
1666 CumulativeLambdaThetaTree lt_tree_;
1667
1668 // Needed by ComputeConditionalStartMins.
1669 DualCapacityThetaTree dual_capacity_tree_;
1670
1671 // Stack of updates to the new start min to do.
1672 std::vector<std::pair<IntervalVar*, int64_t>> start_min_update_;
1673
1674 // update_map_[d][i] is an integer such that if a task
1675 // whose demand is d cannot end before by_end_max_[i], then it cannot start
1676 // before update_map_[d][i].
1677 absl::flat_hash_map<int64_t, UpdatesForADemand*> update_map_;
1678
1679 // Has one task a demand min == 0
1680 Rev<bool> has_zero_demand_tasks_;
1681};
1682
1683// A point in time where the usage profile changes.
1684// Starting from time (included), the usage is what it was immediately before
1685// time, plus the delta.
1686//
1687// Example:
1688// Consider the following vector of ProfileDelta's:
1689// { t=1, d=+3}, { t=4, d=+1 }, { t=5, d=-2}, { t=8, d=-1}
1690// This represents the following usage profile:
1691//
1692// usage
1693// 4 | ****.
1694// 3 | ************. .
1695// 2 | . . ************.
1696// 1 | . . . .
1697// 0 |*******----------------------------*******************-> time
1698// 0 1 2 3 4 5 6 7 8 9
1699//
1700// Note that the usage profile is right-continuous (see
1701// http://en.wikipedia.org/wiki/Left-continuous#Directional_continuity).
1702// This is because intervals for tasks are always closed on the start side
1703// and open on the end side.
1704struct ProfileDelta {
1705 ProfileDelta(int64_t _time, int64_t _delta) : time(_time), delta(_delta) {}
1706 int64_t time;
1707 int64_t delta;
1710bool TimeLessThan(const ProfileDelta& delta1, const ProfileDelta& delta2) {
1711 return delta1.time < delta2.time;
1712}
1713
1714// Cumulative time-table.
1715//
1716// This class implements a propagator for the CumulativeConstraint which is not
1717// incremental, and where a call to InitialPropagate() takes time which is
1718// O(n^2) and Omega(n log n) with n the number of cumulative tasks.
1719//
1720// Despite the high complexity, this propagator is needed, because of those
1721// implemented, it is the only one that satisfy that if all instantiated, no
1722// contradiction will be detected if and only if the constraint is satisfied.
1723//
1724// The implementation is quite naive, and could certainly be improved, for
1725// example by maintaining the profile incrementally.
1726template <class Task>
1727class CumulativeTimeTable : public Constraint {
1728 public:
1729 CumulativeTimeTable(Solver* const solver, const std::vector<Task*>& tasks,
1730 IntVar* const capacity)
1731 : Constraint(solver), by_start_min_(tasks), capacity_(capacity) {
1732 // There may be up to 2 delta's per interval (one on each side),
1733 // plus two sentinels
1734 const int profile_max_size = 2 * by_start_min_.size() + 2;
1735 profile_non_unique_time_.reserve(profile_max_size);
1736 profile_unique_time_.reserve(profile_max_size);
1737 }
1738
1739 // This type is neither copyable nor movable.
1740 CumulativeTimeTable(const CumulativeTimeTable&) = delete;
1741 CumulativeTimeTable& operator=(const CumulativeTimeTable&) = delete;
1742
1743 ~CumulativeTimeTable() override { gtl::STLDeleteElements(&by_start_min_); }
1744
1745 void InitialPropagate() override {
1746 BuildProfile();
1747 PushTasks();
1748 // TODO(user): When a task has a fixed part, we could propagate
1749 // max_demand from its current location.
1750 }
1751
1752 void Post() override {
1753 Demon* demon = MakeDelayedConstraintDemon0(
1754 solver(), this, &CumulativeTimeTable::InitialPropagate,
1755 "InitialPropagate");
1756 for (Task* const task : by_start_min_) {
1757 task->WhenAnything(demon);
1758 }
1759 capacity_->WhenRange(demon);
1760 }
1761
1762 void Accept(ModelVisitor* const visitor) const override {
1763 LOG(FATAL) << "Should not be visited";
1764 }
1765
1766 std::string DebugString() const override { return "CumulativeTimeTable"; }
1767
1768 private:
1769 // Build the usage profile. Runs in O(n log n).
1770 void BuildProfile() {
1771 // Build profile with non unique time
1772 profile_non_unique_time_.clear();
1773 for (const Task* const task : by_start_min_) {
1774 const IntervalVar* const interval = task->interval;
1775 const int64_t start_max = interval->StartMax();
1776 const int64_t end_min = interval->EndMin();
1777 if (interval->MustBePerformed() && start_max < end_min) {
1778 const int64_t demand_min = task->DemandMin();
1779 if (demand_min > 0) {
1780 profile_non_unique_time_.emplace_back(start_max, +demand_min);
1781 profile_non_unique_time_.emplace_back(end_min, -demand_min);
1782 }
1783 }
1784 }
1785 // Sort
1786 std::sort(profile_non_unique_time_.begin(), profile_non_unique_time_.end(),
1787 TimeLessThan);
1788 // Build profile with unique times
1789 profile_unique_time_.clear();
1790 profile_unique_time_.emplace_back(std::numeric_limits<int64_t>::min(), 0);
1791 int64_t usage = 0;
1792 for (const ProfileDelta& step : profile_non_unique_time_) {
1793 if (step.time == profile_unique_time_.back().time) {
1794 profile_unique_time_.back().delta += step.delta;
1795 } else {
1796 profile_unique_time_.push_back(step);
1797 }
1798 // Update usage.
1799 usage += step.delta;
1800 }
1801 // Check final usage to be 0.
1802 DCHECK_EQ(0, usage);
1803 // Scan to find max usage.
1804 int64_t max_usage = 0;
1805 for (const ProfileDelta& step : profile_unique_time_) {
1806 usage += step.delta;
1807 if (usage > max_usage) {
1808 max_usage = usage;
1809 }
1810 }
1811 DCHECK_EQ(0, usage);
1812 capacity_->SetMin(max_usage);
1813 // Add a sentinel.
1814 profile_unique_time_.emplace_back(std::numeric_limits<int64_t>::max(), 0);
1815 }
1816
1817 // Update the start min for all tasks. Runs in O(n^2) and Omega(n).
1818 void PushTasks() {
1819 std::sort(by_start_min_.begin(), by_start_min_.end(),
1820 StartMinLessThan<Task>);
1821 int64_t usage = 0;
1822 int profile_index = 0;
1823 for (const Task* const task : by_start_min_) {
1824 const IntervalVar* const interval = task->interval;
1825 if (interval->StartMin() == interval->StartMax() &&
1826 interval->EndMin() == interval->EndMax()) {
1827 continue;
1828 }
1829 while (interval->StartMin() > profile_unique_time_[profile_index].time) {
1830 DCHECK(profile_index < profile_unique_time_.size());
1831 ++profile_index;
1832 usage += profile_unique_time_[profile_index].delta;
1833 }
1834 PushTask(task, profile_index, usage);
1835 }
1836 }
1837
1838 // Push the given task to new_start_min, defined as the smallest integer such
1839 // that the profile usage for all tasks, excluding the current one, does not
1840 // exceed capacity_ - task->demand on the interval
1841 // [new_start_min, new_start_min + task->interval->DurationMin() ).
1842 void PushTask(const Task* const task, int profile_index, int64_t usage) {
1843 // Init
1844 const IntervalVar* const interval = task->interval;
1845 const int64_t demand_min = task->DemandMin();
1846 if (demand_min == 0) { // Demand can be null, nothing to propagate.
1847 return;
1848 }
1849 const int64_t residual_capacity = CapSub(capacity_->Max(), demand_min);
1850 const int64_t duration = task->interval->DurationMin();
1851 const ProfileDelta& first_prof_delta = profile_unique_time_[profile_index];
1852
1853 int64_t new_start_min = interval->StartMin();
1854
1855 DCHECK_GE(first_prof_delta.time, interval->StartMin());
1856 // The check above is with a '>='. Let's first treat the '>' case
1857 if (first_prof_delta.time > interval->StartMin()) {
1858 // There was no profile delta at a time between interval->StartMin()
1859 // (included) and the current one.
1860 // As we don't delete delta's of 0 value, this means the current task
1861 // does not contribute to the usage before:
1862 DCHECK((interval->StartMax() >= first_prof_delta.time) ||
1863 (interval->StartMax() >= interval->EndMin()));
1864 // The 'usage' given in argument is valid at first_prof_delta.time. To
1865 // compute the usage at the start min, we need to remove the last delta.
1866 const int64_t usage_at_start_min = CapSub(usage, first_prof_delta.delta);
1867 if (usage_at_start_min > residual_capacity) {
1868 new_start_min = profile_unique_time_[profile_index].time;
1869 }
1870 }
1871
1872 // Influence of current task
1873 const int64_t start_max = interval->StartMax();
1874 const int64_t end_min = interval->EndMin();
1875 ProfileDelta delta_start(start_max, 0);
1876 ProfileDelta delta_end(end_min, 0);
1877 if (interval->MustBePerformed() && start_max < end_min) {
1878 delta_start.delta = +demand_min;
1879 delta_end.delta = -demand_min;
1880 }
1881 while (profile_unique_time_[profile_index].time <
1882 CapAdd(duration, new_start_min)) {
1883 const ProfileDelta& profile_delta = profile_unique_time_[profile_index];
1884 DCHECK(profile_index < profile_unique_time_.size());
1885 // Compensate for current task
1886 if (profile_delta.time == delta_start.time) {
1887 usage -= delta_start.delta;
1888 }
1889 if (profile_delta.time == delta_end.time) {
1890 usage -= delta_end.delta;
1891 }
1892 // Increment time
1893 ++profile_index;
1894 DCHECK(profile_index < profile_unique_time_.size());
1895 // Does it fit?
1896 if (usage > residual_capacity) {
1897 new_start_min = profile_unique_time_[profile_index].time;
1898 }
1899 usage += profile_unique_time_[profile_index].delta;
1900 }
1901 task->interval->SetStartMin(new_start_min);
1902 }
1903
1904 typedef std::vector<ProfileDelta> Profile;
1905
1906 Profile profile_unique_time_;
1907 Profile profile_non_unique_time_;
1908 std::vector<Task*> by_start_min_;
1909 IntVar* const capacity_;
1910};
1911
1912// Cumulative idempotent Time-Table.
1913//
1914// This propagator is based on Letort et al. 2012 add Gay et al. 2015.
1915//
1916// TODO(user): fill the description once the incremental aspect are
1917// implemented.
1918//
1919// Worst case: O(n^2 log n) -- really unlikely in practice.
1920// Best case: Omega(1).
1921// Practical: Almost linear in the number of unfixed tasks.
1922template <class Task>
1923class TimeTableSync : public Constraint {
1924 public:
1925 TimeTableSync(Solver* const solver, const std::vector<Task*>& tasks,
1926 IntVar* const capacity)
1927 : Constraint(solver), tasks_(tasks), capacity_(capacity) {
1928 num_tasks_ = tasks_.size();
1929 gap_ = 0;
1930 prev_gap_ = 0;
1931 pos_ = std::numeric_limits<int64_t>::min();
1932 next_pos_ = std::numeric_limits<int64_t>::min();
1933 // Allocate vectors to contain no more than n_tasks.
1934 start_min_.reserve(num_tasks_);
1935 start_max_.reserve(num_tasks_);
1936 end_min_.reserve(num_tasks_);
1937 durations_.reserve(num_tasks_);
1938 demands_.reserve(num_tasks_);
1939 }
1940
1941 ~TimeTableSync() override { gtl::STLDeleteElements(&tasks_); }
1942
1943 void InitialPropagate() override {
1944 // Reset data structures.
1945 BuildEvents();
1946 while (!events_scp_.empty() && !events_ecp_.empty()) {
1947 // Move the sweep line.
1948 pos_ = NextEventTime();
1949 // Update the profile with compulsory part events.
1950 ProcessEventsScp();
1951 ProcessEventsEcp();
1952 // Update minimum capacity (may fail)
1953 capacity_->SetMin(capacity_->Max() - gap_);
1954 // Time to the next possible profile increase.
1955 next_pos_ = NextScpTime();
1956 // Consider new task to schedule.
1957 ProcessEventsPr();
1958 // Filter.
1959 FilterMin();
1960 }
1961 }
1962
1963 void Post() override {
1964 Demon* demon = MakeDelayedConstraintDemon0(
1965 solver(), this, &TimeTableSync::InitialPropagate, "InitialPropagate");
1966 for (Task* const task : tasks_) {
1967 task->WhenAnything(demon);
1968 }
1969 capacity_->WhenRange(demon);
1970 }
1971
1972 void Accept(ModelVisitor* const visitor) const override {
1973 LOG(FATAL) << "Should not be visited";
1974 }
1975
1976 std::string DebugString() const override { return "TimeTableSync"; }
1977
1978 private:
1979 // Task state.
1980 enum State { NONE, READY, CHECK, CONFLICT };
1981
1982 inline int64_t NextScpTime() {
1983 return !events_scp_.empty() ? events_scp_.top().first
1984 : std::numeric_limits<int64_t>::max();
1985 }
1986
1987 inline int64_t NextEventTime() {
1988 int64_t time = std::numeric_limits<int64_t>::max();
1989 if (!events_pr_.empty()) {
1990 time = events_pr_.top().first;
1991 }
1992 if (!events_scp_.empty()) {
1993 int64_t t = events_scp_.top().first;
1994 time = t < time ? t : time;
1995 }
1996 if (!events_ecp_.empty()) {
1997 int64_t t = events_ecp_.top().first;
1998 time = t < time ? t : time;
1999 }
2000 return time;
2001 }
2002
2003 void ProcessEventsScp() {
2004 while (!events_scp_.empty() && events_scp_.top().first == pos_) {
2005 const int64_t task_id = events_scp_.top().second;
2006 events_scp_.pop();
2007 const int64_t old_end_min = end_min_[task_id];
2008 if (states_[task_id] == State::CONFLICT) {
2009 // Update cached values.
2010 const int64_t new_end_min = pos_ + durations_[task_id];
2011 start_min_[task_id] = pos_;
2012 end_min_[task_id] = new_end_min;
2013 // Filter the domain
2014 tasks_[task_id]->interval->SetStartMin(pos_);
2015 }
2016 // The task is scheduled.
2017 states_[task_id] = State::READY;
2018 // Update the profile if the task has a compulsory part.
2019 if (pos_ < end_min_[task_id]) {
2020 gap_ -= demands_[task_id];
2021 if (old_end_min <= pos_) {
2022 events_ecp_.push(kv(end_min_[task_id], task_id));
2023 }
2024 }
2025 }
2026 }
2027
2028 void ProcessEventsEcp() {
2029 while (!events_ecp_.empty() && events_ecp_.top().first == pos_) {
2030 const int64_t task_id = events_ecp_.top().second;
2031 events_ecp_.pop();
2032 // Update the event if it is not up to date.
2033 if (pos_ < end_min_[task_id]) {
2034 events_ecp_.push(kv(end_min_[task_id], task_id));
2035 } else {
2036 gap_ += demands_[task_id];
2037 }
2038 }
2039 }
2040
2041 void ProcessEventsPr() {
2042 while (!events_pr_.empty() && events_pr_.top().first == pos_) {
2043 const int64_t task_id = events_pr_.top().second;
2044 events_pr_.pop();
2045 // The task is in conflict with the current profile.
2046 if (demands_[task_id] > gap_) {
2047 states_[task_id] = State::CONFLICT;
2048 conflict_.push(kv(demands_[task_id], task_id));
2049 continue;
2050 }
2051 // The task is not in conflict for the moment.
2052 if (next_pos_ < end_min_[task_id]) {
2053 states_[task_id] = State::CHECK;
2054 check_.push(kv(demands_[task_id], task_id));
2055 continue;
2056 }
2057 // The task is not in conflict and can be scheduled.
2058 states_[task_id] = State::READY;
2059 }
2060 }
2061
2062 void FilterMin() {
2063 // The profile exceeds the capacity.
2064 capacity_->SetMin(capacity_->Max() - gap_);
2065 // The profile has increased.
2066 if (gap_ < prev_gap_) {
2067 // Reconsider the task in check state.
2068 while (!check_.empty() && demands_[check_.top().second] > gap_) {
2069 const int64_t task_id = check_.top().second;
2070 check_.pop();
2071 if (states_[task_id] == State::CHECK && pos_ < end_min_[task_id]) {
2072 states_[task_id] = State::CONFLICT;
2073 conflict_.push(kv(demands_[task_id], task_id));
2074 continue;
2075 }
2076 states_[task_id] = State::READY;
2077 }
2078 prev_gap_ = gap_;
2079 }
2080 // The profile has decreased.
2081 if (gap_ > prev_gap_) {
2082 // Reconsider the tasks in conflict.
2083 while (!conflict_.empty() && demands_[conflict_.top().second] <= gap_) {
2084 const int64_t task_id = conflict_.top().second;
2085 conflict_.pop();
2086 if (states_[task_id] != State::CONFLICT) {
2087 continue;
2088 }
2089 const int64_t old_end_min = end_min_[task_id];
2090 // Update the cache.
2091 start_min_[task_id] = pos_;
2092 end_min_[task_id] = pos_ + durations_[task_id];
2093 // Filter the domain.
2094 tasks_[task_id]->interval->SetStartMin(pos_); // should not fail.
2095 // The task still have to be checked.
2096 if (next_pos_ < end_min_[task_id]) {
2097 states_[task_id] = State::CHECK;
2098 check_.push(kv(demands_[task_id], task_id));
2099 } else {
2100 states_[task_id] = State::READY;
2101 }
2102 // Update possible compulsory part.
2103 const int64_t start_max = start_max_[task_id];
2104 if (start_max >= old_end_min && start_max < end_min_[task_id]) {
2105 events_ecp_.push(kv(end_min_[task_id], task_id));
2106 }
2107 }
2108 }
2109 prev_gap_ = gap_;
2110 }
2111
2112 void BuildEvents() {
2113 // Reset the sweep line.
2114 pos_ = std::numeric_limits<int64_t>::min();
2115 next_pos_ = std::numeric_limits<int64_t>::min();
2116 gap_ = capacity_->Max();
2117 prev_gap_ = capacity_->Max();
2118 // Reset dynamic states.
2119 conflict_ = min_heap();
2120 check_ = max_heap();
2121 // Reset profile events.
2122 events_pr_ = min_heap();
2123 events_scp_ = min_heap();
2124 events_ecp_ = min_heap();
2125 // Reset cache.
2126 start_min_.clear();
2127 start_max_.clear();
2128 end_min_.clear();
2129 durations_.clear();
2130 demands_.clear();
2131 states_.clear();
2132 // Build events.
2133 for (int i = 0; i < num_tasks_; i++) {
2134 const int64_t s_min = tasks_[i]->interval->StartMin();
2135 const int64_t s_max = tasks_[i]->interval->StartMax();
2136 const int64_t e_min = tasks_[i]->interval->EndMin();
2137 // Cache the values.
2138 start_min_.push_back(s_min);
2139 start_max_.push_back(s_max);
2140 end_min_.push_back(e_min);
2141 durations_.push_back(tasks_[i]->interval->DurationMin());
2142 demands_.push_back(tasks_[i]->DemandMin());
2143 // Reset task state.
2144 states_.push_back(State::NONE);
2145 // Start compulsory part event.
2146 events_scp_.push(kv(s_max, i));
2147 // Pruning event only if the start time of the task is not fixed.
2148 if (s_min != s_max) {
2149 events_pr_.push(kv(s_min, i));
2150 }
2151 // End of compulsory part only if the task has a compulsory part.
2152 if (s_max < e_min) {
2153 events_ecp_.push(kv(e_min, i));
2154 }
2155 }
2156 }
2157
2158 int64_t num_tasks_;
2159 std::vector<Task*> tasks_;
2160 IntVar* const capacity_;
2161
2162 std::vector<int64_t> start_min_;
2163 std::vector<int64_t> start_max_;
2164 std::vector<int64_t> end_min_;
2165 std::vector<int64_t> end_max_;
2166 std::vector<int64_t> durations_;
2167 std::vector<int64_t> demands_;
2168
2169 // Pair key value.
2170 typedef std::pair<int64_t, int64_t> kv;
2171 typedef std::priority_queue<kv, std::vector<kv>, std::greater<kv>> min_heap;
2172 typedef std::priority_queue<kv, std::vector<kv>, std::less<kv>> max_heap;
2173
2174 // Profile events.
2175 min_heap events_pr_;
2176 min_heap events_scp_;
2177 min_heap events_ecp_;
2178
2179 // Task state.
2180 std::vector<State> states_;
2181 min_heap conflict_;
2182 max_heap check_;
2183
2184 // Sweep line state.
2185 int64_t pos_;
2186 int64_t next_pos_;
2187 int64_t gap_;
2188 int64_t prev_gap_;
2189};
2190
2191class CumulativeConstraint : public Constraint {
2192 public:
2193 CumulativeConstraint(Solver* const s,
2194 const std::vector<IntervalVar*>& intervals,
2195 const std::vector<int64_t>& demands,
2196 IntVar* const capacity, absl::string_view name)
2197 : Constraint(s),
2198 capacity_(capacity),
2199 intervals_(intervals),
2200 demands_(demands) {
2201 tasks_.reserve(intervals.size());
2202 for (int i = 0; i < intervals.size(); ++i) {
2203 tasks_.push_back(CumulativeTask(intervals[i], demands[i]));
2204 }
2205 }
2206
2207 // This type is neither copyable nor movable.
2208 CumulativeConstraint(const CumulativeConstraint&) = delete;
2209 CumulativeConstraint& operator=(const CumulativeConstraint&) = delete;
2210
2211 void Post() override {
2212 // For the cumulative constraint, there are many propagators, and they
2213 // don't dominate each other. So the strongest propagation is obtained
2214 // by posting a bunch of different propagators.
2215 const ConstraintSolverParameters& params = solver()->const_parameters();
2216 if (params.use_cumulative_time_table()) {
2217 if (params.use_cumulative_time_table_sync()) {
2218 PostOneSidedConstraint(false, false, true);
2219 PostOneSidedConstraint(true, false, true);
2220 } else {
2221 PostOneSidedConstraint(false, false, false);
2222 PostOneSidedConstraint(true, false, false);
2223 }
2224 }
2225 if (params.use_cumulative_edge_finder()) {
2226 PostOneSidedConstraint(false, true, false);
2227 PostOneSidedConstraint(true, true, false);
2228 }
2229 if (params.use_sequence_high_demand_tasks()) {
2230 PostHighDemandSequenceConstraint();
2231 }
2232 if (params.use_all_possible_disjunctions()) {
2233 PostAllDisjunctions();
2234 }
2235 }
2236
2237 void InitialPropagate() override {
2238 // Nothing to do: this constraint delegates all the work to other classes
2239 }
2240
2241 void Accept(ModelVisitor* const visitor) const override {
2242 // TODO(user): Build arrays on demand?
2243 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2244 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2245 intervals_);
2246 visitor->VisitIntegerArrayArgument(ModelVisitor::kDemandsArgument,
2247 demands_);
2248 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2249 capacity_);
2250 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2251 }
2252
2253 std::string DebugString() const override {
2254 return absl::StrFormat("CumulativeConstraint([%s], %s)",
2255 JoinDebugString(tasks_, ", "),
2256 capacity_->DebugString());
2257 }
2258
2259 private:
2260 // Post temporal disjunctions for tasks that cannot overlap.
2261 void PostAllDisjunctions() {
2262 for (int i = 0; i < intervals_.size(); ++i) {
2263 IntervalVar* const interval_i = intervals_[i];
2264 if (interval_i->MayBePerformed()) {
2265 for (int j = i + 1; j < intervals_.size(); ++j) {
2266 IntervalVar* const interval_j = intervals_[j];
2267 if (interval_j->MayBePerformed()) {
2268 if (CapAdd(tasks_[i].demand, tasks_[j].demand) > capacity_->Max()) {
2269 Constraint* const constraint =
2270 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2271 solver()->AddConstraint(constraint);
2272 }
2273 }
2274 }
2275 }
2276 }
2277 }
2278
2279 // Post a Sequence constraint for tasks that requires strictly more than half
2280 // of the resource
2281 void PostHighDemandSequenceConstraint() {
2282 Constraint* constraint = nullptr;
2283 { // Need a block to avoid memory leaks in case the AddConstraint fails
2284 std::vector<IntervalVar*> high_demand_intervals;
2285 high_demand_intervals.reserve(intervals_.size());
2286 for (int i = 0; i < demands_.size(); ++i) {
2287 const int64_t demand = tasks_[i].demand;
2288 // Consider two tasks with demand d1 and d2 such that
2289 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2290 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2291 // > 1/2 (capacity_ + capacity_)
2292 // > capacity_.
2293 // Therefore these two tasks cannot overlap.
2294 if (demand * 2 > capacity_->Max() &&
2295 tasks_[i].interval->MayBePerformed()) {
2296 high_demand_intervals.push_back(tasks_[i].interval);
2297 }
2298 }
2299 if (high_demand_intervals.size() >= 2) {
2300 // If there are less than 2 such intervals, the constraint would do
2301 // nothing
2302 std::string seq_name = absl::StrCat(name(), "-HighDemandSequence");
2303 constraint = solver()->MakeDisjunctiveConstraint(high_demand_intervals,
2304 seq_name);
2305 }
2306 }
2307 if (constraint != nullptr) {
2308 solver()->AddConstraint(constraint);
2309 }
2310 }
2311
2312 // Populate the given vector with useful tasks, meaning the ones on which
2313 // some propagation can be done
2314 void PopulateVectorUsefulTasks(
2315 bool mirror, std::vector<CumulativeTask*>* const useful_tasks) {
2316 DCHECK(useful_tasks->empty());
2317 for (int i = 0; i < tasks_.size(); ++i) {
2318 const CumulativeTask& original_task = tasks_[i];
2319 IntervalVar* const interval = original_task.interval;
2320 // Check if exceed capacity
2321 if (original_task.demand > capacity_->Max()) {
2322 interval->SetPerformed(false);
2323 }
2324 // Add to the useful_task vector if it may be performed and that it
2325 // actually consumes some of the resource.
2326 if (interval->MayBePerformed() && original_task.demand > 0) {
2327 Solver* const s = solver();
2328 IntervalVar* const original_interval = original_task.interval;
2329 IntervalVar* const interval =
2330 mirror ? s->MakeMirrorInterval(original_interval)
2331 : original_interval;
2332 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2333 useful_tasks->push_back(
2334 new CumulativeTask(relaxed_max, original_task.demand));
2335 }
2336 }
2337 }
2338
2339 // Makes and return an edge-finder or a time table, or nullptr if it is not
2340 // necessary.
2341 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2342 bool tt_sync) {
2343 std::vector<CumulativeTask*> useful_tasks;
2344 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2345 if (useful_tasks.empty()) {
2346 return nullptr;
2347 } else {
2348 Solver* const s = solver();
2349 if (edge_finder) {
2350 const ConstraintSolverParameters& params = solver()->const_parameters();
2351 return useful_tasks.size() < params.max_edge_finder_size()
2352 ? s->RevAlloc(new EdgeFinder<CumulativeTask>(s, useful_tasks,
2353 capacity_))
2354 : nullptr;
2355 }
2356 if (tt_sync) {
2357 return s->RevAlloc(
2358 new TimeTableSync<CumulativeTask>(s, useful_tasks, capacity_));
2359 }
2360 return s->RevAlloc(
2361 new CumulativeTimeTable<CumulativeTask>(s, useful_tasks, capacity_));
2362 }
2363 }
2364
2365 // Post a straight or mirrored edge-finder, if needed
2366 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2367 Constraint* const constraint =
2368 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2369 if (constraint != nullptr) {
2370 solver()->AddConstraint(constraint);
2371 }
2372 }
2373
2374 // Capacity of the cumulative resource
2375 IntVar* const capacity_;
2376
2377 // The tasks that share the cumulative resource
2378 std::vector<CumulativeTask> tasks_;
2379
2380 // Array of intervals for the visitor.
2381 const std::vector<IntervalVar*> intervals_;
2382 // Array of demands for the visitor.
2383 const std::vector<int64_t> demands_;
2384};
2385
2386class VariableDemandCumulativeConstraint : public Constraint {
2387 public:
2388 VariableDemandCumulativeConstraint(Solver* const s,
2389 const std::vector<IntervalVar*>& intervals,
2390 const std::vector<IntVar*>& demands,
2391 IntVar* const capacity,
2392 absl::string_view name)
2393 : Constraint(s),
2394 capacity_(capacity),
2395 intervals_(intervals),
2396 demands_(demands) {
2397 tasks_.reserve(intervals.size());
2398 for (int i = 0; i < intervals.size(); ++i) {
2399 tasks_.push_back(VariableCumulativeTask(intervals[i], demands[i]));
2400 }
2401 }
2402
2403 // This type is neither copyable nor movable.
2404 VariableDemandCumulativeConstraint(
2405 const VariableDemandCumulativeConstraint&) = delete;
2406 VariableDemandCumulativeConstraint& operator=(
2407 const VariableDemandCumulativeConstraint&) = delete;
2408
2409 void Post() override {
2410 // For the cumulative constraint, there are many propagators, and they
2411 // don't dominate each other. So the strongest propagation is obtained
2412 // by posting a bunch of different propagators.
2413 const ConstraintSolverParameters& params = solver()->const_parameters();
2414 if (params.use_cumulative_time_table()) {
2415 PostOneSidedConstraint(false, false, false);
2416 PostOneSidedConstraint(true, false, false);
2417 }
2418 if (params.use_cumulative_edge_finder()) {
2419 PostOneSidedConstraint(false, true, false);
2420 PostOneSidedConstraint(true, true, false);
2421 }
2422 if (params.use_sequence_high_demand_tasks()) {
2423 PostHighDemandSequenceConstraint();
2424 }
2425 if (params.use_all_possible_disjunctions()) {
2426 PostAllDisjunctions();
2427 }
2428 }
2429
2430 void InitialPropagate() override {
2431 // Nothing to do: this constraint delegates all the work to other classes
2432 }
2433
2434 void Accept(ModelVisitor* const visitor) const override {
2435 // TODO(user): Build arrays on demand?
2436 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2437 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2438 intervals_);
2439 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kDemandsArgument,
2440 demands_);
2441 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2442 capacity_);
2443 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2444 }
2445
2446 std::string DebugString() const override {
2447 return absl::StrFormat("VariableDemandCumulativeConstraint([%s], %s)",
2448 JoinDebugString(tasks_, ", "),
2449 capacity_->DebugString());
2450 }
2451
2452 private:
2453 // Post temporal disjunctions for tasks that cannot overlap.
2454 void PostAllDisjunctions() {
2455 for (int i = 0; i < intervals_.size(); ++i) {
2456 IntervalVar* const interval_i = intervals_[i];
2457 if (interval_i->MayBePerformed()) {
2458 for (int j = i + 1; j < intervals_.size(); ++j) {
2459 IntervalVar* const interval_j = intervals_[j];
2460 if (interval_j->MayBePerformed()) {
2461 if (CapAdd(tasks_[i].demand->Min(), tasks_[j].demand->Min()) >
2462 capacity_->Max()) {
2463 Constraint* const constraint =
2464 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2465 solver()->AddConstraint(constraint);
2466 }
2467 }
2468 }
2469 }
2470 }
2471 }
2472
2473 // Post a Sequence constraint for tasks that requires strictly more than half
2474 // of the resource
2475 void PostHighDemandSequenceConstraint() {
2476 Constraint* constraint = nullptr;
2477 { // Need a block to avoid memory leaks in case the AddConstraint fails
2478 std::vector<IntervalVar*> high_demand_intervals;
2479 high_demand_intervals.reserve(intervals_.size());
2480 for (int i = 0; i < demands_.size(); ++i) {
2481 const int64_t demand = tasks_[i].demand->Min();
2482 // Consider two tasks with demand d1 and d2 such that
2483 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2484 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2485 // > 1/2 (capacity_ + capacity_)
2486 // > capacity_.
2487 // Therefore these two tasks cannot overlap.
2488 if (demand * 2 > capacity_->Max() &&
2489 tasks_[i].interval->MayBePerformed()) {
2490 high_demand_intervals.push_back(tasks_[i].interval);
2491 }
2492 }
2493 if (high_demand_intervals.size() >= 2) {
2494 // If there are less than 2 such intervals, the constraint would do
2495 // nothing
2496 const std::string seq_name =
2497 absl::StrCat(name(), "-HighDemandSequence");
2498 constraint = solver()->MakeStrictDisjunctiveConstraint(
2499 high_demand_intervals, seq_name);
2500 }
2501 }
2502 if (constraint != nullptr) {
2503 solver()->AddConstraint(constraint);
2504 }
2505 }
2506
2507 // Populates the given vector with useful tasks, meaning the ones on which
2508 // some propagation can be done
2509 void PopulateVectorUsefulTasks(
2510 bool mirror, std::vector<VariableCumulativeTask*>* const useful_tasks) {
2511 DCHECK(useful_tasks->empty());
2512 for (int i = 0; i < tasks_.size(); ++i) {
2513 const VariableCumulativeTask& original_task = tasks_[i];
2514 IntervalVar* const interval = original_task.interval;
2515 // Check if exceed capacity
2516 if (original_task.demand->Min() > capacity_->Max()) {
2517 interval->SetPerformed(false);
2518 }
2519 // Add to the useful_task vector if it may be performed and that it
2520 // may actually consume some of the resource.
2521 if (interval->MayBePerformed() && original_task.demand->Max() > 0) {
2522 Solver* const s = solver();
2523 IntervalVar* const original_interval = original_task.interval;
2524 IntervalVar* const interval =
2525 mirror ? s->MakeMirrorInterval(original_interval)
2526 : original_interval;
2527 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2528 useful_tasks->push_back(
2529 new VariableCumulativeTask(relaxed_max, original_task.demand));
2530 }
2531 }
2532 }
2533
2534 // Makes and returns an edge-finder or a time table, or nullptr if it is not
2535 // necessary.
2536 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2537 bool tt_sync) {
2538 std::vector<VariableCumulativeTask*> useful_tasks;
2539 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2540 if (useful_tasks.empty()) {
2541 return nullptr;
2542 } else {
2543 Solver* const s = solver();
2544 if (edge_finder) {
2545 return s->RevAlloc(
2546 new EdgeFinder<VariableCumulativeTask>(s, useful_tasks, capacity_));
2547 }
2548 if (tt_sync) {
2549 return s->RevAlloc(new TimeTableSync<VariableCumulativeTask>(
2550 s, useful_tasks, capacity_));
2551 }
2552 return s->RevAlloc(new CumulativeTimeTable<VariableCumulativeTask>(
2553 s, useful_tasks, capacity_));
2554 }
2555 }
2556
2557 // Post a straight or mirrored edge-finder, if needed
2558 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2559 Constraint* const constraint =
2560 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2561 if (constraint != nullptr) {
2562 solver()->AddConstraint(constraint);
2563 }
2564 }
2565
2566 // Capacity of the cumulative resource
2567 IntVar* const capacity_;
2568
2569 // The tasks that share the cumulative resource
2570 std::vector<VariableCumulativeTask> tasks_;
2571
2572 // Array of intervals for the visitor.
2573 const std::vector<IntervalVar*> intervals_;
2574 // Array of demands for the visitor.
2575 const std::vector<IntVar*> demands_;
2576};
2577} // namespace
2578
2579// Sequence Constraint
2580
2581// ----- Public class -----
2582
2583DisjunctiveConstraint::DisjunctiveConstraint(
2584 Solver* const s, const std::vector<IntervalVar*>& intervals,
2585 const std::string& name)
2586 : Constraint(s), intervals_(intervals) {
2587 if (!name.empty()) {
2588 set_name(name);
2589 }
2590 transition_time_ = [](int64_t x, int64_t y) { return 0; };
2591}
2592
2594
2596 std::function<int64_t(int64_t, int64_t)> transition_time) {
2597 if (transition_time != nullptr) {
2598 transition_time_ = transition_time;
2599 } else {
2600 transition_time_ = [](int64_t x, int64_t y) { return 0; };
2601 }
2602}
2603
2604// ---------- Factory methods ----------
2605
2606DisjunctiveConstraint* Solver::MakeDisjunctiveConstraint(
2607 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2608 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, false));
2609}
2610
2612 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2613 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, true));
2614}
2615
2616// Demands are constant
2617
2618Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2619 const std::vector<int64_t>& demands,
2620 int64_t capacity, const std::string& name) {
2621 CHECK_EQ(intervals.size(), demands.size());
2622 for (int i = 0; i < intervals.size(); ++i) {
2623 CHECK_GE(demands[i], 0);
2624 }
2625 if (capacity == 1 && AreAllOnes(demands)) {
2626 return MakeDisjunctiveConstraint(intervals, name);
2627 }
2628 return RevAlloc(new CumulativeConstraint(this, intervals, demands,
2629 MakeIntConst(capacity), name));
2630}
2631
2632Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2633 const std::vector<int>& demands,
2634 int64_t capacity, const std::string& name) {
2635 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2636}
2637
2638Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2639 const std::vector<int64_t>& demands,
2640 IntVar* const capacity,
2641 absl::string_view name) {
2642 CHECK_EQ(intervals.size(), demands.size());
2643 for (int i = 0; i < intervals.size(); ++i) {
2644 CHECK_GE(demands[i], 0);
2645 }
2646 return RevAlloc(
2647 new CumulativeConstraint(this, intervals, demands, capacity, name));
2648}
2649
2650Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2651 const std::vector<int>& demands,
2652 IntVar* const capacity,
2653 const std::string& name) {
2654 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2655}
2656
2657// Demands are variable
2658
2659Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2660 const std::vector<IntVar*>& demands,
2661 int64_t capacity, const std::string& name) {
2662 CHECK_EQ(intervals.size(), demands.size());
2663 for (int i = 0; i < intervals.size(); ++i) {
2664 CHECK_GE(demands[i]->Min(), 0);
2665 }
2666 if (AreAllBound(demands)) {
2667 std::vector<int64_t> fixed_demands(demands.size());
2668 for (int i = 0; i < demands.size(); ++i) {
2669 fixed_demands[i] = demands[i]->Value();
2670 }
2671 return MakeCumulative(intervals, fixed_demands, capacity, name);
2672 }
2673 return RevAlloc(new VariableDemandCumulativeConstraint(
2674 this, intervals, demands, MakeIntConst(capacity), name));
2675}
2676
2677Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2678 const std::vector<IntVar*>& demands,
2679 IntVar* const capacity,
2680 const std::string& name) {
2681 CHECK_EQ(intervals.size(), demands.size());
2682 for (int i = 0; i < intervals.size(); ++i) {
2683 CHECK_GE(demands[i]->Min(), 0);
2684 }
2685 if (AreAllBound(demands)) {
2686 std::vector<int64_t> fixed_demands(demands.size());
2687 for (int i = 0; i < demands.size(); ++i) {
2688 fixed_demands[i] = demands[i]->Value();
2689 }
2690 return MakeCumulative(intervals, fixed_demands, capacity, name);
2691 }
2692 return RevAlloc(new VariableDemandCumulativeConstraint(
2693 this, intervals, demands, capacity, name));
2694}
2695} // namespace operations_research
IntegerValue y
IntegerValue size
int64_t min
int right_child
void SetTransitionTime(Solver::IndexEvaluator2 transition_time)
Definition resource.cc:2597
static IntegralType CeilOfRatio(IntegralType numerator, IntegralType denominator)
Definition mathutil.h:40
virtual std::string name() const
Object naming.
Constraint * MakeCumulative(const std::vector< IntervalVar * > &intervals, const std::vector< int64_t > &demands, int64_t capacity, const std::string &name)
Demands are constant.
Definition resource.cc:2620
DisjunctiveConstraint * MakeDisjunctiveConstraint(const std::vector< IntervalVar * > &intervals, const std::string &name)
-------— Factory methods -------—
Definition resource.cc:2608
DisjunctiveConstraint * MakeStrictDisjunctiveConstraint(const std::vector< IntervalVar * > &intervals, const std::string &name)
Definition resource.cc:2613
IntVar * MakeIntConst(int64_t val, const std::string &name)
IntConst will create a constant expression.
const std::string name
A name for logging purposes.
IntVar * var
int index
const Collection::value_type::second_type FindPtrOrNull(const Collection &collection, const typename Collection::value_type::first_type &key)
Definition map_util.h:94
void STLDeleteValues(T *v)
Definition stl_util.h:382
void STLDeleteElements(T *container)
Definition stl_util.h:372
For infeasible and unbounded see Not checked if options check_solutions_if_inf_or_unbounded and the If options first_solution_only is false
problem is infeasible or unbounded (default).
Definition matchers.h:468
double Distance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:264
In SWIG mode, we don't want anything besides these top-level includes.
int64_t CapAdd(int64_t x, int64_t y)
std::string JoinDebugStringPtr(const std::vector< T > &v, absl::string_view separator)
Join v[i]->DebugString().
int64_t CapSub(int64_t x, int64_t y)
Demon * MakeDelayedConstraintDemon0(Solver *const s, T *const ct, void(T::*method)(), const std::string &name)
int64_t CapProd(int64_t x, int64_t y)
std::vector< int64_t > ToInt64Vector(const std::vector< int > &input)
--— Vector manipulations --—
Definition utilities.cc:829
bool AreAllOnes(const std::vector< T > &values)
bool AreAllBound(const std::vector< IntVar * > &vars)
std::string JoinDebugString(const std::vector< T > &v, absl::string_view separator)
Join v[i].DebugString().
STL namespace.
false true
Definition numbers.cc:228
const Variable x
Definition qp_tests.cc:127
static const int kNone
Special value for task indices meaning 'no such task'.
Definition resource.cc:236
int64_t demand
Definition resource.cc:126
int64_t residual_energetic_end_min
Max_{subset S of Theta} (residual_capacity * start_min(S) + energy(S))
Definition resource.cc:1252
int argmax_energy_opt
Definition resource.cc:365
int64_t energetic_end_min
Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
Definition resource.cc:358
int64_t energy
Definition resource.cc:355
int64_t energetic_end_min_opt
Definition resource.cc:369
int64_t total_processing
Definition resource.cc:200
int index
Definition resource.cc:102
int64_t total_ect
Definition resource.cc:201
static const int64_t kNotInitialized
Definition resource.cc:1261
int64_t energy_opt
Max_{i in Lambda} (energy(Theta union {i}))
Definition resource.cc:361
int argmax_energetic_end_min_opt
Definition resource.cc:373
static const int64_t kNotAvailable
Definition resource.cc:1311
int64_t time
Definition resource.cc:1708
int64_t delta
Definition resource.cc:1709
IntervalVar * interval
Definition resource.cc:101
Rev< int64_t > start_max
Rev< int > performed
Rev< int64_t > end_min
int64_t start