Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
disjunctive.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <utility>
19#include <vector>
20
21#include "absl/algorithm/container.h"
22#include "absl/base/log_severity.h"
23#include "absl/cleanup/cleanup.h"
24#include "absl/log/check.h"
25#include "absl/log/log.h"
26#include "absl/types/span.h"
28#include "ortools/sat/integer.h"
32#include "ortools/sat/model.h"
38#include "ortools/sat/util.h"
40#include "ortools/util/sort.h"
42
43namespace operations_research {
44namespace sat {
45
46void AddDisjunctive(const std::vector<Literal>& enforcement_literals,
47 const std::vector<IntervalVariable>& intervals,
48 Model* model) {
49 // Depending on the parameters, create all pair of conditional precedences.
50 // TODO(user): create them dynamically instead?
51 const auto& params = *model->GetOrCreate<SatParameters>();
52 if (intervals.size() <=
53 params.max_size_to_create_precedence_literals_in_disjunctive() &&
54 params.use_strong_propagation_in_disjunctive() &&
55 enforcement_literals.empty()) {
57 }
58
59 bool is_all_different = true;
61 for (const IntervalVariable var : intervals) {
62 if (repository->IsOptional(var) || repository->MinSize(var) != 1 ||
63 repository->MaxSize(var) != 1) {
64 is_all_different = false;
65 break;
66 }
67 }
68 if (is_all_different) {
69 std::vector<AffineExpression> starts;
70 starts.reserve(intervals.size());
71 for (const IntervalVariable interval : intervals) {
72 starts.push_back(repository->Start(interval));
73 }
74 model->Add(AllDifferentOnBounds(enforcement_literals, starts));
75 return;
76 }
77
78 auto* watcher = model->GetOrCreate<GenericLiteralWatcher>();
79 if (intervals.size() > 2 && params.use_combined_no_overlap() &&
80 enforcement_literals.empty()) {
81 model->GetOrCreate<CombinedDisjunctive<true>>()->AddNoOverlap(intervals);
82 model->GetOrCreate<CombinedDisjunctive<false>>()->AddNoOverlap(intervals);
83 return;
84 }
85
87 enforcement_literals, intervals, /*register_as_disjunctive_helper=*/true);
88
89 // Experiments to use the timetable only to propagate the disjunctive.
90 if (/*DISABLES_CODE*/ (false && enforcement_literals.empty())) {
91 const AffineExpression one(IntegerValue(1));
92 std::vector<AffineExpression> demands(intervals.size(), one);
93 SchedulingDemandHelper* demands_helper =
94 repository->GetOrCreateDemandHelper(helper, demands);
95
96 TimeTablingPerTask* timetable =
97 new TimeTablingPerTask(one, helper, demands_helper, model);
98 timetable->RegisterWith(watcher);
99 model->TakeOwnership(timetable);
100 return;
101 }
102
103 if (intervals.size() == 2) {
104 DisjunctiveWithTwoItems* propagator = new DisjunctiveWithTwoItems(helper);
105 propagator->RegisterWith(watcher);
106 model->TakeOwnership(propagator);
107 } else {
108 // We decided to create the propagators in this particular order, but it
109 // shouldn't matter much because of the different priorities used.
110 if (!params.use_strong_propagation_in_disjunctive()) {
111 // This one will not propagate anything if we added all precedence
112 // literals since the linear propagator will already do that in that case.
113 DisjunctiveSimplePrecedences* simple_precedences =
114 new DisjunctiveSimplePrecedences(helper, model);
115 const int id = simple_precedences->RegisterWith(watcher);
116 watcher->SetPropagatorPriority(id, 1);
117 model->TakeOwnership(simple_precedences);
118 }
119 {
120 // Only one direction is needed by this one.
121 DisjunctiveOverloadChecker* overload_checker =
122 new DisjunctiveOverloadChecker(helper, model);
123 const int id = overload_checker->RegisterWith(watcher);
124 watcher->SetPropagatorPriority(id, 1);
125 model->TakeOwnership(overload_checker);
126 }
127 for (const bool time_direction : {true, false}) {
128 DisjunctiveDetectablePrecedences* detectable_precedences =
129 new DisjunctiveDetectablePrecedences(time_direction, helper, model);
130 const int id = detectable_precedences->RegisterWith(watcher);
131 watcher->SetPropagatorPriority(id, 2);
132 model->TakeOwnership(detectable_precedences);
133 }
134 for (const bool time_direction : {true, false}) {
135 DisjunctiveNotLast* not_last =
136 new DisjunctiveNotLast(time_direction, helper, model);
137 const int id = not_last->RegisterWith(watcher);
138 watcher->SetPropagatorPriority(id, 3);
139 model->TakeOwnership(not_last);
140 }
141 for (const bool time_direction : {true, false}) {
142 DisjunctiveEdgeFinding* edge_finding =
143 new DisjunctiveEdgeFinding(time_direction, helper, model);
144 const int id = edge_finding->RegisterWith(watcher);
145 watcher->SetPropagatorPriority(id, 4);
146 model->TakeOwnership(edge_finding);
147 }
148 }
149
150 // Note that we keep this one even when there is just two intervals. This is
151 // because it might push a variable that is after both of the intervals
152 // using the fact that they are in disjunction.
153 if (params.use_precedences_in_disjunctive_constraint() &&
154 !params.use_combined_no_overlap()) {
155 // Lets try to exploit linear3 too.
156 model->GetOrCreate<LinearPropagator>()->SetPushAffineUbForBinaryRelation();
157
158 for (const bool time_direction : {true, false}) {
159 DisjunctivePrecedences* precedences =
160 new DisjunctivePrecedences(time_direction, helper, model);
161 const int id = precedences->RegisterWith(watcher);
162 watcher->SetPropagatorPriority(id, 5);
163 model->TakeOwnership(precedences);
164 }
165 }
166}
167
169 absl::Span<const IntervalVariable> intervals, Model* model) {
170 auto* repo = model->GetOrCreate<IntervalsRepository>();
171 for (int i = 0; i < intervals.size(); ++i) {
172 for (int j = i + 1; j < intervals.size(); ++j) {
173 repo->CreateDisjunctivePrecedenceLiteral(intervals[i], intervals[j]);
174 }
175 }
176}
177
178void TaskSet::AddEntry(const Entry& e) {
179 int j = sorted_tasks_.size();
180 sorted_tasks_.push_back(e);
181 while (j > 0 && sorted_tasks_[j - 1].start_min > e.start_min) {
182 sorted_tasks_[j] = sorted_tasks_[j - 1];
183 --j;
184 }
185 sorted_tasks_[j] = e;
186 DCHECK(std::is_sorted(sorted_tasks_.begin(), sorted_tasks_.end()));
187
188 // If the task is added after optimized_restart_, we know that we don't need
189 // to scan the task before optimized_restart_ in the next ComputeEndMin().
190 if (j <= optimized_restart_) optimized_restart_ = 0;
191}
192
194 int t) {
195 const IntegerValue dmin = helper.SizeMin(t);
196 AddEntry({t, std::max(helper.StartMin(t), helper.EndMin(t) - dmin), dmin});
197}
198
200 const int size = sorted_tasks_.size();
201 for (int i = 0;; ++i) {
202 if (i == size) return;
203 if (sorted_tasks_[i].task == e.task) {
204 for (int j = i; j + 1 < size; ++j) {
205 sorted_tasks_[j] = sorted_tasks_[j + 1];
206 }
207 sorted_tasks_.pop_back();
208 break;
209 }
210 }
211
212 optimized_restart_ = sorted_tasks_.size();
213 sorted_tasks_.push_back(e);
214 DCHECK(std::is_sorted(sorted_tasks_.begin(), sorted_tasks_.end()));
215}
216
217IntegerValue TaskSet::ComputeEndMin() const {
218 DCHECK(std::is_sorted(sorted_tasks_.begin(), sorted_tasks_.end()));
219 const int size = sorted_tasks_.size();
220 IntegerValue end_min = kMinIntegerValue;
221 for (int i = optimized_restart_; i < size; ++i) {
222 const Entry& e = sorted_tasks_[i];
223 if (e.start_min >= end_min) {
224 optimized_restart_ = i;
225 end_min = e.start_min + e.size_min;
226 } else {
227 end_min += e.size_min;
228 }
229 }
230 return end_min;
231}
232
233IntegerValue TaskSet::ComputeEndMin(int task_to_ignore,
234 int* critical_index) const {
235 // The order in which we process tasks with the same start-min doesn't matter.
236 DCHECK(std::is_sorted(sorted_tasks_.begin(), sorted_tasks_.end()));
237 bool ignored = false;
238 const int size = sorted_tasks_.size();
239 IntegerValue end_min = kMinIntegerValue;
240
241 // If the ignored task is last and was the start of the critical block, then
242 // we need to reset optimized_restart_.
243 if (optimized_restart_ + 1 == size &&
244 sorted_tasks_[optimized_restart_].task == task_to_ignore) {
245 optimized_restart_ = 0;
246 }
247
248 for (int i = optimized_restart_; i < size; ++i) {
249 const Entry& e = sorted_tasks_[i];
250 if (e.task == task_to_ignore) {
251 ignored = true;
252 continue;
253 }
254 if (e.start_min >= end_min) {
255 *critical_index = i;
256 if (!ignored) optimized_restart_ = i;
257 end_min = e.start_min + e.size_min;
258 } else {
259 end_min += e.size_min;
260 }
261 }
262 return end_min;
263}
264
266 DCHECK_EQ(helper_->NumTasks(), 2);
267 if (!helper_->IsEnforced()) return true;
268 if (!helper_->SynchronizeAndSetTimeDirection(true)) return false;
269
270 // We can't propagate anything if one of the interval is absent for sure.
271 if (helper_->IsAbsent(0) || helper_->IsAbsent(1)) return true;
272
273 // We can't propagate anything for two intervals if neither are present.
274 if (!helper_->IsPresent(0) && !helper_->IsPresent(0)) return true;
275
276 // Note that this propagation also take care of the "overload checker" part.
277 // It also propagates as much as possible, even in the presence of task with
278 // variable sizes.
279 //
280 // TODO(user): For optional interval whose presence in unknown and without
281 // optional variable, the end-min may not be propagated to at least (start_min
282 // + size_min). Consider that into the computation so we may decide the
283 // interval forced absence? Same for the start-max.
284 int task_before = 0;
285 int task_after = 1;
286
287 const bool task_0_before_task_1 = helper_->TaskIsBeforeOrIsOverlapping(0, 1);
288 const bool task_1_before_task_0 = helper_->TaskIsBeforeOrIsOverlapping(1, 0);
289
290 if (task_0_before_task_1 && task_1_before_task_0 &&
291 helper_->IsPresent(task_before) && helper_->IsPresent(task_after)) {
292 helper_->ResetReason();
293 helper_->AddPresenceReason(task_before);
294 helper_->AddPresenceReason(task_after);
295 helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_before, task_after);
296 helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_after, task_before);
297 return helper_->ReportConflict();
298 }
299
300 if (task_0_before_task_1) {
301 // Task 0 must be before task 1.
302 } else if (task_1_before_task_0) {
303 // Task 1 must be before task 0.
304 std::swap(task_before, task_after);
305 } else {
306 return true;
307 }
308
309 helper_->ResetReason();
310 helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_before, task_after);
311 if (!helper_->PushTaskOrderWhenPresent(task_before, task_after)) {
312 return false;
313 }
314 return true;
315}
316
318 const int id = watcher->Register(this);
319 helper_->WatchAllTasks(id);
321 return id;
322}
323
324template <bool time_direction>
326 : helper_(model->GetOrCreate<IntervalsRepository>()->GetOrCreateHelper(
327 /*enforcement_literals=*/{},
328 model->GetOrCreate<IntervalsRepository>()->AllIntervals())) {
329 task_to_disjunctives_.resize(helper_->NumTasks());
330
331 auto* watcher = model->GetOrCreate<GenericLiteralWatcher>();
332 const int id = watcher->Register(this);
333 helper_->WatchAllTasks(id);
334 watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id);
335}
336
337template <bool time_direction>
339 absl::Span<const IntervalVariable> vars) {
340 const int index = task_sets_.size();
341 task_sets_.emplace_back(task_set_storage_.emplace_back());
342 end_mins_.push_back(kMinIntegerValue);
343 for (const IntervalVariable var : vars) {
344 task_to_disjunctives_[var.value()].push_back(index);
345 }
346}
347
348template <bool time_direction>
350 if (!helper_->SynchronizeAndSetTimeDirection(time_direction)) return false;
351 const auto& task_by_increasing_end_min = helper_->TaskByIncreasingEndMin();
352 const auto& task_by_negated_start_max =
353 helper_->TaskByIncreasingNegatedStartMax();
354
355 for (auto& task_set : task_sets_) task_set.Clear();
356 end_mins_.assign(end_mins_.size(), kMinIntegerValue);
357 IntegerValue max_of_end_min = kMinIntegerValue;
358
359 const int num_tasks = helper_->NumTasks();
360 task_is_added_.assign(num_tasks, false);
361 int queue_index = num_tasks - 1;
362 for (const auto task_time : task_by_increasing_end_min) {
363 const int t = task_time.task_index;
364 const IntegerValue end_min = task_time.time;
365 if (helper_->IsAbsent(t)) continue;
366
367 // Update all task sets.
368 while (queue_index >= 0) {
369 const auto to_insert = task_by_negated_start_max[queue_index];
370 const int task_index = to_insert.task_index;
371 const IntegerValue start_max = -to_insert.time;
372 if (end_min <= start_max) break;
373 if (helper_->IsPresent(task_index)) {
374 task_is_added_[task_index] = true;
375 const IntegerValue shifted_smin = helper_->ShiftedStartMin(task_index);
376 const IntegerValue size_min = helper_->SizeMin(task_index);
377 for (const int d_index : task_to_disjunctives_[task_index]) {
378 // TODO(user): AddEntry() and ComputeEndMin() could be combined.
379 task_sets_[d_index].AddEntry({task_index, shifted_smin, size_min});
380 end_mins_[d_index] = task_sets_[d_index].ComputeEndMin();
381 max_of_end_min = std::max(max_of_end_min, end_mins_[d_index]);
382 }
383 }
384 --queue_index;
385 }
386
387 // Find out amongst the disjunctives in which t appear, the one with the
388 // largest end_min, ignoring t itself. This will be the new start min for t.
389 IntegerValue new_start_min = helper_->StartMin(t);
390 if (new_start_min >= max_of_end_min) continue;
391 int best_critical_index = 0;
392 int best_d_index = -1;
393 if (task_is_added_[t]) {
394 for (const int d_index : task_to_disjunctives_[t]) {
395 if (new_start_min >= end_mins_[d_index]) continue;
396 int critical_index = 0;
397 const IntegerValue end_min_of_critical_tasks =
398 task_sets_[d_index].ComputeEndMin(/*task_to_ignore=*/t,
399 &critical_index);
400 DCHECK_LE(end_min_of_critical_tasks, max_of_end_min);
401 if (end_min_of_critical_tasks > new_start_min) {
402 new_start_min = end_min_of_critical_tasks;
403 best_d_index = d_index;
404 best_critical_index = critical_index;
405 }
406 }
407 } else {
408 // If the task t was not added, then there is no task to ignore and
409 // end_mins_[d_index] is up to date.
410 for (const int d_index : task_to_disjunctives_[t]) {
411 if (end_mins_[d_index] > new_start_min) {
412 new_start_min = end_mins_[d_index];
413 best_d_index = d_index;
414 }
415 }
416 if (best_d_index != -1) {
417 const IntegerValue end_min_of_critical_tasks =
418 task_sets_[best_d_index].ComputeEndMin(/*task_to_ignore=*/t,
419 &best_critical_index);
420 CHECK_EQ(end_min_of_critical_tasks, new_start_min);
421 }
422 }
423
424 // Do we push something?
425 if (best_d_index == -1) continue;
426
427 // Same reason as DisjunctiveDetectablePrecedences.
428 // TODO(user): Maybe factor out the code? It does require a function with a
429 // lot of arguments though.
430 helper_->ResetReason();
431 const absl::Span<const TaskSet::Entry> sorted_tasks =
432 task_sets_[best_d_index].SortedTasks();
433 const IntegerValue window_start =
434 sorted_tasks[best_critical_index].start_min;
435 for (int i = best_critical_index; i < sorted_tasks.size(); ++i) {
436 const int ct = sorted_tasks[i].task;
437 if (ct == t) continue;
438 helper_->AddPresenceReason(ct);
439 helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, window_start);
440 helper_->AddStartMaxReason(ct, end_min - 1);
441 }
442 helper_->AddEndMinReason(t, end_min);
443 if (!helper_->IncreaseStartMin(t, new_start_min)) {
444 return false;
445 }
446
447 // We need to reorder t inside task_set_. Note that if t is in the set,
448 // it means that the task is present and that IncreaseStartMin() did push
449 // its start (by opposition to an optional interval where the push might
450 // not happen if its start is not optional).
451 if (task_is_added_[t]) {
452 const IntegerValue shifted_smin = helper_->ShiftedStartMin(t);
453 const IntegerValue size_min = helper_->SizeMin(t);
454 for (const int d_index : task_to_disjunctives_[t]) {
455 // TODO(user): Refactor the code to use the same algo as in
456 // DisjunctiveDetectablePrecedences, it is superior and do not need
457 // this function.
458 task_sets_[d_index].NotifyEntryIsNowLastIfPresent(
459 {t, shifted_smin, size_min});
460 end_mins_[d_index] = task_sets_[d_index].ComputeEndMin();
461 max_of_end_min = std::max(max_of_end_min, end_mins_[d_index]);
462 }
463 }
464 }
465 return true;
466}
467
469 if (!helper_->IsEnforced()) return true;
470 stats_.OnPropagate();
471 if (!helper_->SynchronizeAndSetTimeDirection(/*is_forward=*/true)) {
472 ++stats_.num_conflicts;
473 return false;
474 }
475
476 absl::Span<TaskTime> window_span =
477 absl::MakeSpan(helper_->GetScratchTaskTimeVector1());
478
479 // We use this to detect precedence between task that must cause a push.
480 TaskTime task_with_max_end_min = {0, kMinIntegerValue};
481
482 // We will process the task by increasing shifted_end_max, thus consuming task
483 // from the end of that vector.
484 absl::Span<const CachedTaskBounds>
485 task_by_increasing_negated_shifted_end_max =
486 helper_->TaskByIncreasingNegatedShiftedEndMax();
487
488 // Split problem into independent part.
489 //
490 // Many propagators in this file use the same approach, we start by processing
491 // the task by increasing shifted start-min, packing everything to the left.
492 // We then process each "independent" set of task separately. A task is
493 // independent from the one before it, if its start-min wasn't pushed.
494 //
495 // This way, we get one or more window [window_start, window_end] so that for
496 // all task in the window, [start_min, end_min] is inside the window, and the
497 // end min of any set of task to the left is <= window_start, and the
498 // start_min of any task to the right is >= window_end.
499
500 // We keep the best end_min for present task only and present task plus at
501 // most a single optional task.
502 IntegerValue end_min_of_present = kMinIntegerValue;
503 IntegerValue end_min_with_one_optional = kMinIntegerValue;
504
505 IntegerValue relevant_end;
506 int window_size = 0;
507 int relevant_size = 0;
508 TaskTime* const window = window_span.data();
509 for (const auto [task, presence_lit, start_min] :
510 helper_->TaskByIncreasingShiftedStartMin()) {
511 if (helper_->IsAbsent(presence_lit)) continue;
512
513 // Nothing to do with overload checking, but almost free to do that here.
514 const IntegerValue size_min = helper_->SizeMin(task);
515 const IntegerValue end_min = start_min + size_min;
516 const IntegerValue start_max = helper_->StartMax(task);
517 if (start_max < task_with_max_end_min.time &&
518 helper_->IsPresent(presence_lit) && size_min > 0) {
519 // We have a detectable precedence that must cause a push.
520 //
521 // Remarks: If we added all precedence literals + linear relation, this
522 // propagation would have been done by the linear propagator, but if we
523 // didn't add such relations yet, it is beneficial to detect that here!
524 //
525 // TODO(user): Actually, we just inferred a "not last" so we could check
526 // for relevant_size > 2 potential propagation?
527 //
528 // Note that the DisjunctiveSimplePrecedences propagators will also
529 // detects such precedences between two tasks.
530 const int to_push = task_with_max_end_min.task_index;
531 helper_->ResetReason();
532 helper_->AddReasonForBeingBeforeAssumingNoOverlap(task, to_push);
533 if (!helper_->PushTaskOrderWhenPresent(task, to_push)) {
534 ++stats_.num_conflicts;
535 return false;
536 }
537
538 // TODO(user): Shall we keep propagating? we know the prefix didn't
539 // change, so we could be faster here. On another hand, it might be
540 // better to propagate all the linear constraints before returning
541 // here.
542 ++stats_.num_propagations;
543
544 stats_.EndWithoutConflicts();
545 return true;
546 }
547
548 // Note that we need to do that AFTER the block above.
549 if (end_min > task_with_max_end_min.time) {
550 task_with_max_end_min = {task, end_min};
551 }
552
553 // Extend window.
554 const IntegerValue old_value = end_min_with_one_optional;
555 if (start_min < end_min_with_one_optional) {
556 window[window_size++] = {task, start_min};
557
558 if (helper_->IsPresent(presence_lit)) {
559 end_min_of_present =
560 std::max(start_min + size_min, end_min_of_present + size_min);
561 end_min_with_one_optional += size_min;
562 } else {
563 if (end_min_of_present == kMinIntegerValue) {
564 // only optional:
565 end_min_with_one_optional =
566 std::max(end_min_with_one_optional, start_min + size_min);
567 } else {
568 end_min_with_one_optional =
569 std::max(end_min_with_one_optional,
570 std::max(end_min_of_present, start_min) + size_min);
571 }
572 }
573
574 if (old_value > start_max) {
575 relevant_size = window_size;
576 relevant_end = end_min_with_one_optional;
577 }
578 continue;
579 }
580
581 // Process current window.
582 // We don't need to process the end of the window (after relevant_size)
583 // because these interval can be greedily assembled in a feasible solution.
584 if (relevant_size > 0 &&
585 !PropagateSubwindow(window_span.subspan(0, relevant_size), relevant_end,
586 &task_by_increasing_negated_shifted_end_max)) {
587 ++stats_.num_conflicts;
588 return false;
589 }
590
591 // Subwindow propagation might have propagated that the
592 // task_with_max_end_min must be absent.
593 if (helper_->IsAbsent(task_with_max_end_min.task_index)) {
594 task_with_max_end_min = {0, kMinIntegerValue};
595 }
596
597 // Start of the next window.
598 window_size = 0;
599 window[window_size++] = {task, start_min};
600
601 if (helper_->IsPresent(presence_lit)) {
602 end_min_of_present = start_min + size_min;
603 end_min_with_one_optional = end_min_of_present;
604 } else {
605 end_min_of_present = kMinIntegerValue;
606 end_min_with_one_optional = start_min + size_min;
607 }
608
609 relevant_size = 0;
610 }
611
612 // Process last window.
613 if (relevant_size > 0 &&
614 !PropagateSubwindow(window_span.subspan(0, relevant_size), relevant_end,
615 &task_by_increasing_negated_shifted_end_max)) {
616 ++stats_.num_conflicts;
617 return false;
618 }
619
620 if (DEBUG_MODE) {
621 // This test that our "subwindow" logic is sound and we don't miss any
622 // conflict or propagation by splitting the intervals like this.
623 //
624 // TODO(user): I think we can come up with corner case where pushing a task
625 // absence forces another one to be present, which might create more
626 // propagation. This is the same reason why we might not reach a fixed point
627 // in one go. However this should be rare and that piece of code allowed me
628 // to fix many bug. So I left it as is, we can revisit if we starts to see
629 // crashes on our tests.
630 window_size = 0;
631 task_by_increasing_negated_shifted_end_max =
632 helper_->TaskByIncreasingNegatedShiftedEndMax();
633 for (const auto [task, presence_lit, start_min] :
634 helper_->TaskByIncreasingShiftedStartMin()) {
635 if (helper_->IsAbsent(presence_lit)) continue;
636 window[window_size++] = {task, start_min};
637 }
638 const int64_t saved = helper_->PropagationTimestamp();
639 CHECK(PropagateSubwindow(window_span.subspan(0, window_size),
641 &task_by_increasing_negated_shifted_end_max));
642 CHECK_EQ(saved, helper_->PropagationTimestamp());
643 }
644
645 stats_.EndWithoutConflicts();
646 return true;
647}
648
649// TODO(user): Improve the Overload Checker using delayed insertion.
650// We insert events at the cost of O(log n) per insertion, and this is where
651// the algorithm spends most of its time, thus it is worth improving.
652// We can insert an arbitrary set of tasks at the cost of O(n) for the whole
653// set. This is useless for the overload checker as is since we need to check
654// overload after every insertion, but we could use an upper bound of the
655// theta envelope to save us from checking the actual value.
656bool DisjunctiveOverloadChecker::PropagateSubwindow(
657 absl::Span<TaskTime> sub_window, IntegerValue global_window_end,
658 absl::Span<const CachedTaskBounds>*
659 task_by_increasing_negated_shifted_end_max) {
660 // Set up theta tree.
661 TaskTime* const window = sub_window.data();
662 int num_events = 0;
663 for (int i = 0; i < sub_window.size(); ++i) {
664 // No point adding a task if its end_max is too large.
665 const TaskTime& task_time = window[i];
666 const int task = task_time.task_index;
667
668 // We use the shifted end-max which might be < EndMax().
669 const IntegerValue end_max = helper_->ShiftedEndMax(task);
670 if (end_max < global_window_end) {
671 window[num_events] = task_time;
672 task_to_event_[task] = num_events;
673 ++num_events;
674 }
675 }
676 if (num_events <= 1) return true;
677 theta_tree_.Reset(num_events);
678
679 // Introduce events by increasing end_max, check for overloads.
680 while (!task_by_increasing_negated_shifted_end_max->empty()) {
681 const auto [current_task, presence_lit, minus_shifted_end_max] =
682 task_by_increasing_negated_shifted_end_max->back();
683 const IntegerValue current_end = -minus_shifted_end_max;
684 DCHECK_EQ(current_end, helper_->ShiftedEndMax(current_task));
685 if (current_end >= global_window_end) return true; // go to next window.
686 task_by_increasing_negated_shifted_end_max->remove_suffix(1); // consume.
687
688 // Ignore task not part of that window.
689 // Note that we never clear task_to_event_, so it might contains garbage
690 // for the values that we didn't set above. However, we can easily check
691 // if task_to_event_[current_task] point to an element that we set above.
692 // This is a bit faster than clearing task_to_event_ on exit.
693 const int current_event = task_to_event_[current_task];
694 if (current_event < 0 || current_event >= num_events) continue;
695 if (window[current_event].task_index != current_task) continue;
696
697 // We filtered absent task while constructing the subwindow, but it is
698 // possible that as we propagate task absence below, other task also become
699 // absent (if they share the same presence Boolean).
700 if (helper_->IsAbsent(presence_lit)) continue;
701
702 DCHECK_NE(task_to_event_[current_task], -1);
703 {
704 const IntegerValue energy_min = helper_->SizeMin(current_task);
705 if (helper_->IsPresent(presence_lit)) {
706 // TODO(user): Add max energy deduction for variable
707 // sizes by putting the energy_max here and modifying the code
708 // dealing with the optional envelope greater than current_end below.
709 theta_tree_.AddOrUpdateEvent(current_event, window[current_event].time,
710 energy_min, energy_min);
711 } else {
712 theta_tree_.AddOrUpdateOptionalEvent(
713 current_event, window[current_event].time, energy_min);
714 }
715 }
716
717 if (theta_tree_.GetEnvelope() > current_end) {
718 // Explain failure with tasks in critical interval.
719 helper_->ResetReason();
720 const int critical_event =
721 theta_tree_.GetMaxEventWithEnvelopeGreaterThan(current_end);
722 const IntegerValue window_start = window[critical_event].time;
723 const IntegerValue window_end =
724 theta_tree_.GetEnvelopeOf(critical_event) - 1;
725 for (int event = critical_event; event < num_events; event++) {
726 const IntegerValue energy_min = theta_tree_.EnergyMin(event);
727 if (energy_min > 0) {
728 const int task = window[event].task_index;
729 helper_->AddPresenceReason(task);
730 helper_->AddEnergyAfterReason(task, energy_min, window_start);
731 helper_->AddShiftedEndMaxReason(task, window_end);
732 }
733 }
734 return helper_->ReportConflict();
735 }
736
737 // Exclude all optional tasks that would overload an interval ending here.
738 while (theta_tree_.GetOptionalEnvelope() > current_end) {
739 // Explain exclusion with tasks present in the critical interval.
740 // TODO(user): This could be done lazily, like most of the loop to
741 // compute the reasons in this file.
742 helper_->ResetReason();
743 int critical_event;
744 int optional_event;
745 IntegerValue available_energy;
746 theta_tree_.GetEventsWithOptionalEnvelopeGreaterThan(
747 current_end, &critical_event, &optional_event, &available_energy);
748
749 const int optional_task = window[optional_event].task_index;
750
751 // If tasks shares the same presence literal, it is possible that we
752 // already pushed this task absence.
753 if (!helper_->IsAbsent(optional_task)) {
754 const IntegerValue optional_size_min = helper_->SizeMin(optional_task);
755 const IntegerValue window_start = window[critical_event].time;
756 const IntegerValue window_end =
757 current_end + optional_size_min - available_energy - 1;
758 for (int event = critical_event; event < num_events; event++) {
759 const IntegerValue energy_min = theta_tree_.EnergyMin(event);
760 if (energy_min > 0) {
761 const int task = window[event].task_index;
762 helper_->AddPresenceReason(task);
763 helper_->AddEnergyAfterReason(task, energy_min, window_start);
764 helper_->AddShiftedEndMaxReason(task, window_end);
765 }
766 }
767
768 helper_->AddEnergyAfterReason(optional_task, optional_size_min,
769 window_start);
770 helper_->AddShiftedEndMaxReason(optional_task, window_end);
771
772 ++stats_.num_propagations;
773 if (!helper_->PushTaskAbsence(optional_task)) return false;
774 }
775
776 theta_tree_.RemoveEvent(optional_event);
777 }
778 }
779
780 return true;
781}
782
784 // This propagator should mostly reach the fix point in one pass, except if
785 // pushing a task absence make another task present... So we still prefer to
786 // re-run it as it is relatively fast.
787 const int id = watcher->Register(this);
788 helper_->SetTimeDirection(/*is_forward=*/true);
790 helper_->WatchAllTasks(id);
791 return id;
792}
793
795 const int id = watcher->Register(this);
796 helper_->WatchAllTasks(id);
798 return id;
799}
800
802 if (!helper_->IsEnforced()) return true;
803 stats_.OnPropagate();
804
805 const bool current_direction = helper_->CurrentTimeIsForward();
806 for (const bool direction : {current_direction, !current_direction}) {
807 if (!helper_->SynchronizeAndSetTimeDirection(direction)) {
808 ++stats_.num_conflicts;
809 return false;
810 }
811 if (!PropagateOneDirection()) {
812 ++stats_.num_conflicts;
813 return false;
814 }
815 }
816
817 stats_.EndWithoutConflicts();
818 return true;
819}
820
821bool DisjunctiveSimplePrecedences::Push(TaskTime before, int t) {
822 const int t_before = before.task_index;
823
824 DCHECK_NE(t_before, t);
825 helper_->ResetReason();
826 helper_->AddReasonForBeingBeforeAssumingNoOverlap(t_before, t);
827 if (!helper_->PushTaskOrderWhenPresent(t_before, t)) return false;
828 ++stats_.num_propagations;
829 return true;
830}
831
832bool DisjunctiveSimplePrecedences::PropagateOneDirection() {
833 // We will loop in a decreasing way here.
834 // And add tasks that are present to the task_set_.
835 absl::Span<const TaskTime> task_by_negated_start_max =
837
838 // We just keep amongst all the task before current_end_min, the one with the
839 // highesh end-min.
840 TaskTime best_task_before = {-1, kMinIntegerValue};
841
842 // We will loop in an increasing way here and consume task from beginning.
843 absl::Span<const TaskTime> task_by_increasing_end_min =
844 helper_->TaskByIncreasingEndMin();
845
846 for (; !task_by_increasing_end_min.empty();) {
847 // Skip absent task.
848 if (helper_->IsAbsent(task_by_increasing_end_min.front().task_index)) {
849 task_by_increasing_end_min.remove_prefix(1);
850 continue;
851 }
852
853 // Consider all task with a start_max < current_end_min.
854 int blocking_task = -1;
855 IntegerValue blocking_start_max;
856 IntegerValue current_end_min = task_by_increasing_end_min.front().time;
857 for (; true; task_by_negated_start_max.remove_suffix(1)) {
858 if (task_by_negated_start_max.empty()) {
859 // Small optim: this allows to process all remaining task rather than
860 // looping around are retesting this for all remaining tasks.
861 current_end_min = kMaxIntegerValue;
862 break;
863 }
864
865 const auto [t, negated_start_max] = task_by_negated_start_max.back();
866 const IntegerValue start_max = -negated_start_max;
867 if (current_end_min <= start_max) break;
868 if (!helper_->IsPresent(t)) continue;
869
870 // If t has a mandatory part, and extend further than current_end_min
871 // then we can push it first. All tasks for which their push is delayed
872 // are necessarily after this "blocking task".
873 //
874 // This idea is introduced in "Linear-Time Filtering Algorithms for the
875 // Disjunctive Constraints" Hamed Fahimi, Claude-Guy Quimper.
876 const IntegerValue end_min = helper_->EndMin(t);
877 if (blocking_task == -1 && end_min >= current_end_min) {
878 DCHECK_LT(start_max, end_min) << " task should have mandatory part: "
879 << helper_->TaskDebugString(t);
880 blocking_task = t;
881 blocking_start_max = start_max;
882 current_end_min = end_min;
883 } else if (blocking_task != -1 && blocking_start_max < end_min) {
884 // Conflict! the task is after the blocking_task but also before.
885 helper_->ResetReason();
886 helper_->AddPresenceReason(blocking_task);
887 helper_->AddPresenceReason(t);
888 helper_->AddReasonForBeingBeforeAssumingNoOverlap(blocking_task, t);
889 helper_->AddReasonForBeingBeforeAssumingNoOverlap(t, blocking_task);
890 return helper_->ReportConflict();
891 } else if (end_min > best_task_before.time) {
892 best_task_before = {t, end_min};
893 }
894 }
895
896 // If we have a blocking task. We need to propagate it first.
897 if (blocking_task != -1) {
898 DCHECK(!helper_->IsAbsent(blocking_task));
899 if (best_task_before.time > helper_->StartMin(blocking_task)) {
900 if (!Push(best_task_before, blocking_task)) return false;
901 }
902
903 // Update best_task_before.
904 //
905 // Note that we want the blocking task here as it is the only one
906 // guaranteed to be before all the task we push below. If we are not in a
907 // propagation loop, it should also be the best.
908 const IntegerValue end_min = helper_->EndMin(blocking_task);
909 best_task_before = {blocking_task, end_min};
910 }
911
912 // Lets propagate all task after best_task_before.
913 for (; !task_by_increasing_end_min.empty();
914 task_by_increasing_end_min.remove_prefix(1)) {
915 const auto [t, end_min] = task_by_increasing_end_min.front();
916 if (end_min > current_end_min) break;
917 if (t == blocking_task) continue; // Already done.
918
919 // Lets propagate current_task.
920 if (best_task_before.time > helper_->StartMin(t)) {
921 // Corner case if a previous push caused a subsequent task to be absent.
922 if (helper_->IsAbsent(t)) continue;
923 if (!Push(best_task_before, t)) return false;
924 }
925 }
926 }
927 return true;
928}
929
931 if (!helper_->IsEnforced()) return true;
932 stats_.OnPropagate();
933 if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) {
934 ++stats_.num_conflicts;
935 return false;
936 }
937
938 // Compute the "rank" of each task.
939 const auto by_shifted_smin = helper_->TaskByIncreasingShiftedStartMin();
940 int rank = -1;
941 IntegerValue window_end = kMinIntegerValue;
942 int* const ranks = ranks_.data();
943 for (const auto [task, presence_lit, start_min] : by_shifted_smin) {
944 if (!helper_->IsPresent(presence_lit)) {
945 ranks[task] = -1;
946 continue;
947 }
948
949 const IntegerValue size_min = helper_->SizeMin(task);
950 if (start_min < window_end) {
951 ranks[task] = rank;
952 window_end += size_min;
953 } else {
954 ranks[task] = ++rank;
955 window_end = start_min + size_min;
956 }
957 }
958
959 if (!PropagateWithRanks()) {
960 ++stats_.num_conflicts;
961 return false;
962 }
963 stats_.EndWithoutConflicts();
964 return true;
965}
966
967// task_set_ contains all the tasks that must be executed before t. They
968// are in "detectable precedence" because their start_max is smaller than
969// the end-min of t like so:
970// [(the task t)
971// (a task in task_set_)]
972// From there, we deduce that the start-min of t is greater or equal to
973// the end-min of the critical tasks.
974//
975// Note that this works as well when IsPresent(t) is false.
976bool DisjunctiveDetectablePrecedences::Push(IntegerValue task_set_end_min,
977 int t, TaskSet& task_set) {
978 const int critical_index = task_set.GetCriticalIndex();
979 const absl::Span<const TaskSet::Entry> sorted_tasks = task_set.SortedTasks();
980 helper_->ResetReason();
981
982 // We need:
983 // - StartMax(ct) < EndMin(t) for the detectable precedence.
984 // - StartMin(ct) >= window_start for the value of task_set_end_min.
985 const IntegerValue end_min_if_present =
986 helper_->ShiftedStartMin(t) + helper_->SizeMin(t);
987 const IntegerValue window_start = sorted_tasks[critical_index].start_min;
988 IntegerValue min_slack = kMaxIntegerValue;
989 bool all_already_before = true;
990 IntegerValue energy_of_task_before = 0;
991 for (int i = critical_index; i < sorted_tasks.size(); ++i) {
992 const int ct = sorted_tasks[i].task;
993 DCHECK_NE(ct, t);
994 helper_->AddPresenceReason(ct);
995
996 // Heuristic, if some tasks are known to be after the first one,
997 // we just add the min-size as a reason.
998 //
999 // TODO(user): ideally we don't want to do that if we don't have a level
1000 // zero precedence...
1001 if (i > critical_index && helper_->GetCurrentMinDistanceBetweenTasks(
1002 sorted_tasks[critical_index].task, ct) >= 0) {
1004 sorted_tasks[critical_index].task, ct);
1005 helper_->AddSizeMinReason(ct);
1006 } else {
1007 helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, window_start);
1008 }
1009
1010 // We only need the reason for being before if we don't already have
1011 // a static precedence between the tasks.
1012 const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks(ct, t);
1013 if (dist >= 0) {
1015 energy_of_task_before += sorted_tasks[i].size_min;
1016 min_slack = std::min(min_slack, dist);
1017
1018 // TODO(user): The reason in AddReasonForBeingBeforeAssumingNoOverlap()
1019 // only account for dist == 0 !! If we want to use a stronger strictly
1020 // positive distance, we actually need to explain it. Fix this.
1021 min_slack = 0;
1022 } else {
1023 all_already_before = false;
1024 helper_->AddStartMaxReason(ct, end_min_if_present - 1);
1025 }
1026 }
1027
1028 // We only need the end-min of t if not all the task are already known
1029 // to be before.
1030 IntegerValue new_start_min = task_set_end_min;
1031 if (all_already_before) {
1032 // We can actually push further!
1033 // And we don't need other reason except the precedences.
1034 new_start_min += min_slack;
1035 } else {
1036 helper_->AddEndMinReason(t, end_min_if_present);
1037 }
1038
1039 // In some situation, we can push the task further.
1040 // TODO(user): We can also reduce the reason in this case.
1041 if (min_slack != kMaxIntegerValue &&
1042 window_start + energy_of_task_before + min_slack > new_start_min) {
1043 new_start_min = window_start + energy_of_task_before + min_slack;
1044 }
1045
1046 // Process detected precedence.
1047 if (helper_->CurrentDecisionLevel() == 0 && helper_->IsPresent(t)) {
1048 for (int i = critical_index; i < sorted_tasks.size(); ++i) {
1049 if (!helper_->NotifyLevelZeroPrecedence(sorted_tasks[i].task, t)) {
1050 return false;
1051 }
1052 }
1053 }
1054
1055 // This augment the start-min of t. Note that t is not in task set
1056 // yet, so we will use this updated start if we ever add it there.
1057 ++stats_.num_propagations;
1058 if (!helper_->IncreaseStartMin(t, new_start_min)) {
1059 return false;
1060 }
1061
1062 return true;
1063}
1064
1065bool DisjunctiveDetectablePrecedences::PropagateWithRanks() {
1066 // We will "consume" tasks from here.
1067 absl::Span<const TaskTime> task_by_increasing_end_min =
1068 helper_->TaskByIncreasingEndMin();
1069 absl::Span<const TaskTime> task_by_negated_start_max =
1070 helper_->TaskByIncreasingNegatedStartMax();
1071
1072 // We will stop using ranks as soon as we propagated something. This allow to
1073 // be sure this propagate as much as possible in a single pass and seems to
1074 // help slightly.
1075 int highest_rank = 0;
1076 bool some_propagation = false;
1077
1078 // Invariant: need_update is false implies that task_set_end_min is equal to
1079 // task_set.ComputeEndMin().
1080 //
1081 // TODO(user): Maybe it is just faster to merge ComputeEndMin() with
1082 // AddEntry().
1083 TaskSet task_set(helper_->GetScratchTaskSet());
1084 to_add_.clear();
1085 IntegerValue task_set_end_min = kMinIntegerValue;
1086 for (; !task_by_increasing_end_min.empty();) {
1087 // Consider all task with a start_max < current_end_min.
1088 int blocking_task = -1;
1089 IntegerValue blocking_start_max;
1090 IntegerValue current_end_min = task_by_increasing_end_min.front().time;
1091
1092 for (; true; task_by_negated_start_max.remove_suffix(1)) {
1093 if (task_by_negated_start_max.empty()) {
1094 // Small optim: this allows to process all remaining task rather than
1095 // looping around are retesting this for all remaining tasks.
1096 current_end_min = kMaxIntegerValue;
1097 break;
1098 }
1099
1100 const auto [t, negated_start_max] = task_by_negated_start_max.back();
1101 const IntegerValue start_max = -negated_start_max;
1102 if (current_end_min <= start_max) break;
1103
1104 // If the task is not present, its rank will be -1.
1105 const int rank = ranks_[t];
1106 if (rank < highest_rank) continue;
1107 DCHECK(helper_->IsPresent(t));
1108
1109 // If t has a mandatory part, and extend further than current_end_min
1110 // then we can push it first. All tasks for which their push is delayed
1111 // are necessarily after this "blocking task".
1112 //
1113 // This idea is introduced in "Linear-Time Filtering Algorithms for the
1114 // Disjunctive Constraints" Hamed Fahimi, Claude-Guy Quimper.
1115 const IntegerValue end_min = helper_->EndMin(t);
1116 if (blocking_task == -1 && end_min >= current_end_min) {
1117 DCHECK_LT(start_max, end_min) << " task should have mandatory part: "
1118 << helper_->TaskDebugString(t);
1119 blocking_task = t;
1120 blocking_start_max = start_max;
1121 current_end_min = end_min;
1122 } else if (blocking_task != -1 && blocking_start_max < end_min) {
1123 // Conflict! the task is after the blocking_task but also before.
1124 helper_->ResetReason();
1125 helper_->AddPresenceReason(blocking_task);
1126 helper_->AddPresenceReason(t);
1127 helper_->AddReasonForBeingBeforeAssumingNoOverlap(blocking_task, t);
1128 helper_->AddReasonForBeingBeforeAssumingNoOverlap(t, blocking_task);
1129 return helper_->ReportConflict();
1130 } else {
1131 if (!some_propagation && rank > highest_rank) {
1132 to_add_.clear();
1133 task_set.Clear();
1134 highest_rank = rank;
1135 }
1136 to_add_.push_back(t);
1137 }
1138 }
1139
1140 // If we have a blocking task. We need to propagate it first.
1141 if (blocking_task != -1) {
1142 DCHECK(!helper_->IsAbsent(blocking_task));
1143
1144 if (!to_add_.empty()) {
1145 for (const int t : to_add_) {
1146 task_set.AddShiftedStartMinEntry(*helper_, t);
1147 }
1148 to_add_.clear();
1149 task_set_end_min = task_set.ComputeEndMin();
1150 }
1151
1152 if (task_set_end_min > helper_->StartMin(blocking_task)) {
1153 some_propagation = true;
1154 if (!Push(task_set_end_min, blocking_task, task_set)) return false;
1155 }
1156
1157 // This propagators assumes that every push is reflected for its
1158 // correctness.
1159 if (helper_->InPropagationLoop()) return true;
1160
1161 // Insert the blocking_task. Note that because we just pushed it,
1162 // it will be last in task_set_ and also the only reason used to push
1163 // any of the subsequent tasks below. In particular, the reason will be
1164 // valid even though task_set might contains tasks with a start_max
1165 // greater or equal to the end_min of the task we push.
1166 if (!some_propagation && ranks_[blocking_task] > highest_rank) {
1167 to_add_.clear();
1168 task_set.Clear();
1169 highest_rank = ranks_[blocking_task];
1170 }
1171 to_add_.push_back(blocking_task);
1172 }
1173
1174 // Lets propagate all task with end_min <= current_end_min that are also
1175 // after all the task in task_set_.
1176 for (; !task_by_increasing_end_min.empty();
1177 task_by_increasing_end_min.remove_prefix(1)) {
1178 const auto [t, end_min] = task_by_increasing_end_min.front();
1179 if (end_min > current_end_min) break;
1180 if (t == blocking_task) continue; // Already done.
1181
1182 if (!to_add_.empty()) {
1183 for (const int t : to_add_) {
1184 task_set.AddShiftedStartMinEntry(*helper_, t);
1185 }
1186 to_add_.clear();
1187 task_set_end_min = task_set.ComputeEndMin();
1188 }
1189
1190 // Lets propagate current_task.
1191 if (task_set_end_min > helper_->StartMin(t)) {
1192 // Corner case if a previous push caused a subsequent task to be absent.
1193 if (helper_->IsAbsent(t)) continue;
1194
1195 some_propagation = true;
1196 if (!Push(task_set_end_min, t, task_set)) return false;
1197 }
1198 }
1199 }
1200
1201 return true;
1202}
1203
1205 GenericLiteralWatcher* watcher) {
1206 const int id = watcher->Register(this);
1207 helper_->SetTimeDirection(time_direction_);
1208 helper_->WatchAllTasks(id);
1210 return id;
1211}
1212
1214 if (!helper_->IsEnforced()) return true;
1215 stats_.OnPropagate();
1216 if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) {
1217 ++stats_.num_conflicts;
1218 return false;
1219 }
1220 FixedCapacityVector<TaskTime>& window = helper_->GetScratchTaskTimeVector1();
1221
1222 // We only need to consider "critical" set of tasks given how we compute the
1223 // min-offset in PropagateSubwindow().
1224 IntegerValue window_end = kMinIntegerValue;
1225 for (const auto [task, presence_lit, start_min] :
1226 helper_->TaskByIncreasingShiftedStartMin()) {
1227 if (!helper_->IsPresent(presence_lit)) continue;
1228
1229 if (start_min < window_end) {
1230 window.push_back({task, start_min});
1231 window_end += helper_->SizeMin(task);
1232 continue;
1233 }
1234
1235 if (window.size() > 1 && !PropagateSubwindow(absl::MakeSpan(window))) {
1236 ++stats_.num_conflicts;
1237 return false;
1238 }
1239
1240 // Start of the next window.
1241 window.clear();
1242 window.push_back({task, start_min});
1243 window_end = start_min + helper_->SizeMin(task);
1244 }
1245 if (window.size() > 1 && !PropagateSubwindow(absl::MakeSpan(window))) {
1246 ++stats_.num_conflicts;
1247 return false;
1248 }
1249
1250 stats_.EndWithoutConflicts();
1251 return true;
1252}
1253
1254bool DisjunctivePrecedences::PropagateSubwindow(absl::Span<TaskTime> window) {
1255 // This function can be slow, so count it in the dtime.
1256 int64_t num_hash_lookup = 0;
1257 auto cleanup = ::absl::MakeCleanup([&num_hash_lookup, this]() {
1258 time_limit_->AdvanceDeterministicTime(static_cast<double>(num_hash_lookup) *
1259 5e-8);
1260 });
1261
1262 // TODO(user): We shouldn't consider ends for fixed intervals here. But
1263 // then we should do a better job of computing the min-end of a subset of
1264 // intervals from this disjunctive (like using fixed intervals even if there
1265 // is no "before that variable" relationship). Ex: If a variable is after two
1266 // intervals that cannot be both before a fixed one, we could propagate more.
1267 index_to_end_vars_.clear();
1268 int new_size = 0;
1269 for (const auto task_time : window) {
1270 const int task = task_time.task_index;
1271 const AffineExpression& end_exp = helper_->Ends()[task];
1272
1273 // TODO(user): Handle generic affine relation?
1274 if (end_exp.var == kNoIntegerVariable || end_exp.coeff != 1) continue;
1275
1276 window[new_size++] = task_time;
1277 index_to_end_vars_.push_back(end_exp.var);
1278 }
1279 window = window.subspan(0, new_size);
1280
1281 // Because we use the cached value in the window, we don't really care
1282 // on which order we process them.
1283 precedence_relations_->CollectPrecedences(index_to_end_vars_, &before_);
1284
1285 const int size = before_.size();
1286 for (int global_i = 0; global_i < size;) {
1287 const int global_start_i = global_i;
1288 const IntegerVariable var = before_[global_i].var;
1289 DCHECK_NE(var, kNoIntegerVariable);
1290
1291 // Decode the set of task before var.
1292 // Note that like in Propagate() we split this set of task into critical
1293 // subpart as there is no point considering them together.
1294 //
1295 // TODO(user): If more than one set of task push the same variable, we
1296 // probably only want to keep the best push? Maybe we want to process them
1297 // in reverse order of what we do here?
1298 indices_before_.clear();
1299 IntegerValue local_start;
1300 IntegerValue local_end;
1301 for (; global_i < size; ++global_i) {
1302 const EnforcedLinear2Bounds::PrecedenceData& data = before_[global_i];
1303 if (data.var != var) break;
1304 const int index = data.var_index;
1305 const auto [t, start_of_t] = window[index];
1306 if (global_i == global_start_i) { // First loop.
1307 local_start = start_of_t;
1308 local_end = local_start + helper_->SizeMin(t);
1309 } else {
1310 if (start_of_t >= local_end) break;
1311 local_end += helper_->SizeMin(t);
1312 }
1313 indices_before_.push_back({index, data.lin2_index});
1314 }
1315
1316 // No need to consider if we don't have at least two tasks before var.
1317 const int num_before = indices_before_.size();
1318 if (num_before < 2) continue;
1319 skip_.assign(num_before, false);
1320
1321 // Heuristic.
1322 // We will use the current end-min of all the task in indices_before_
1323 // to skip task with an offset not large enough.
1324 IntegerValue end_min_when_all_present = local_end;
1325
1326 // We will consider the end-min of all the subsets [i, num_items) to try to
1327 // push var using the min-offset between var and items of such subset. This
1328 // can be done in linear time by scanning from i = num_items - 1 to 0.
1329 //
1330 // Note that this needs the items in indices_before_ to be sorted by
1331 // their shifted start min (it should be the case).
1332 int best_index = -1;
1333 const IntegerValue current_var_lb = integer_trail_.LowerBound(var);
1334 IntegerValue best_new_lb = current_var_lb;
1335 IntegerValue min_offset_at_best = kMinIntegerValue;
1336 IntegerValue min_offset = kMaxIntegerValue;
1337 IntegerValue sum_of_duration = 0;
1338 for (int i = num_before; --i >= 0;) {
1339 const auto [task_index, lin2_index] = indices_before_[i];
1340 const TaskTime task_time = window[task_index];
1341 const AffineExpression& end_exp = helper_->Ends()[task_time.task_index];
1342
1343 // TODO(user): The lookup here is a bit slow, so we avoid fetching
1344 // the offset as much as possible.
1345 ++num_hash_lookup;
1346 const IntegerValue inner_offset =
1347 -linear2_bounds_->NonTrivialUpperBound(lin2_index);
1348 DCHECK_NE(inner_offset, kMinIntegerValue);
1349
1350 // We have var >= end_exp.var + inner_offset, so
1351 // var >= (end_exp.var + end_exp.constant)
1352 // + (inner_offset - end_exp.constant)
1353 // var >= task end + offset.
1354 const IntegerValue offset = inner_offset - end_exp.constant;
1355
1356 // Heuristic: do not consider this relations if its offset is clearly bad.
1357 const IntegerValue task_size = helper_->SizeMin(task_time.task_index);
1358 if (end_min_when_all_present + offset <= best_new_lb) {
1359 // This is true if we skipped all task so far in this block.
1360 if (min_offset == kMaxIntegerValue) {
1361 // If only one task is left, we can abort.
1362 // This avoid a linear2_bounds_ lookup.
1363 if (i == 1) break;
1364
1365 // Lower the end_min_when_all_present for better filtering later.
1366 end_min_when_all_present -= task_size;
1367 }
1368
1369 skip_[i] = true;
1370 continue;
1371 }
1372
1373 // Add this task to the current subset and compute the new bound.
1374 min_offset = std::min(min_offset, offset);
1375 sum_of_duration += task_size;
1376 const IntegerValue start = task_time.time;
1377 const IntegerValue new_lb = start + sum_of_duration + min_offset;
1378 if (new_lb > best_new_lb) {
1379 min_offset_at_best = min_offset;
1380 best_new_lb = new_lb;
1381 best_index = i;
1382 }
1383 }
1384
1385 // Push?
1386 if (best_new_lb > current_var_lb) {
1387 DCHECK_NE(best_index, -1);
1388 helper_->ResetReason();
1389 const IntegerValue window_start =
1390 window[indices_before_[best_index].first].time;
1391 for (int i = best_index; i < num_before; ++i) {
1392 if (skip_[i]) continue;
1393 const int ct = window[indices_before_[i].first].task_index;
1394 helper_->AddPresenceReason(ct);
1395 helper_->AddEnergyAfterReason(ct, helper_->SizeMin(ct), window_start);
1396
1397 // Fetch the explanation of (var - end) >= min_offset
1398 // This is okay if a bit slow since we only do that when we push.
1399 const auto [expr, ub] = EncodeDifferenceLowerThan(
1400 helper_->Ends()[ct], var, -min_offset_at_best);
1401 helper_->AddReasonForUpperBoundLowerThan(expr, ub);
1402 }
1403 ++stats_.num_propagations;
1404 if (!helper_->PushIntegerLiteral(
1405 IntegerLiteral::GreaterOrEqual(var, best_new_lb))) {
1406 return false;
1407 }
1408 }
1409 }
1410 return true;
1411}
1412
1414 // This propagator reach the fixed point in one go.
1415 // Maybe not in corner cases, but since it is expansive, it is okay not to
1416 // run it again right away
1417 //
1418 // Note also that technically, we don't need to be waked up if only the upper
1419 // bound of the task changes, but this require to use more memory and the gain
1420 // is unclear as this runs with the highest priority.
1421 const int id = watcher->Register(this);
1422 helper_->SetTimeDirection(time_direction_);
1423 helper_->WatchAllTasks(id);
1424 return id;
1425}
1426
1428 if (!helper_->IsEnforced()) return true;
1429 stats_.OnPropagate();
1430 if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) {
1431 ++stats_.num_conflicts;
1432 return false;
1433 }
1434
1435 const auto task_by_negated_start_max =
1436 helper_->TaskByIncreasingNegatedStartMax();
1437 const auto task_by_increasing_shifted_start_min =
1438 helper_->TaskByIncreasingShiftedStartMin();
1439
1440 TaskSet task_set(helper_->GetScratchTaskSet());
1441 FixedCapacityVector<TaskTime>& start_min_window =
1442 helper_->GetScratchTaskTimeVector1();
1443 FixedCapacityVector<TaskTime>& start_max_window =
1444 helper_->GetScratchTaskTimeVector2();
1445
1446 // Split problem into independent part.
1447 //
1448 // The situation is trickier here, and we use two windows:
1449 // - The classical "start_min_window_" as in the other propagator.
1450 // - A second window, that includes all the task with a start_max inside
1451 // [window_start, window_end].
1452 //
1453 // Now, a task from the second window can be detected to be "not last" by only
1454 // looking at the task in the first window. Tasks to the left do not cause
1455 // issue for the task to be last, and tasks to the right will not lower the
1456 // end-min of the task under consideration.
1457 int queue_index = task_by_negated_start_max.size() - 1;
1458 const int num_tasks = task_by_increasing_shifted_start_min.size();
1459 for (int i = 0; i < num_tasks;) {
1460 start_min_window.clear();
1461 IntegerValue window_end = kMinIntegerValue;
1462 for (; i < num_tasks; ++i) {
1463 const auto [task, presence_lit, start_min] =
1464 task_by_increasing_shifted_start_min[i];
1465 if (!helper_->IsPresent(presence_lit)) continue;
1466
1467 if (start_min_window.empty()) {
1468 start_min_window.push_back({task, start_min});
1469 window_end = start_min + helper_->SizeMin(task);
1470 } else if (start_min < window_end) {
1471 start_min_window.push_back({task, start_min});
1472 window_end += helper_->SizeMin(task);
1473 } else {
1474 break;
1475 }
1476 }
1477
1478 // Add to start_max_window_ all the task whose start_max
1479 // fall into [window_start, window_end).
1480 start_max_window.clear();
1481 for (; queue_index >= 0; queue_index--) {
1482 const auto [t, negated_start_max] =
1483 task_by_negated_start_max[queue_index];
1484 const IntegerValue start_max = -negated_start_max;
1485
1486 // Note that we add task whose presence is still unknown here.
1487 if (start_max >= window_end) break;
1488 if (helper_->IsAbsent(t)) continue;
1489 start_max_window.push_back({t, start_max});
1490 }
1491
1492 // If this is the case, we cannot propagate more than the detectable
1493 // precedence propagator. Note that this continue must happen after we
1494 // computed start_max_window_ though.
1495 if (start_min_window.size() <= 1) continue;
1496
1497 // Process current window.
1498 if (!start_max_window.empty() &&
1499 !PropagateSubwindow(task_set, absl::MakeSpan(start_min_window),
1500 absl::MakeSpan(start_max_window))) {
1501 ++stats_.num_conflicts;
1502 return false;
1503 }
1504 }
1505
1506 stats_.EndWithoutConflicts();
1507 return true;
1508}
1509
1510bool DisjunctiveNotLast::PropagateSubwindow(
1511 TaskSet& task_set, absl::Span<TaskTime> task_by_increasing_start_max,
1512 absl::Span<TaskTime> task_by_increasing_end_max) {
1513 for (TaskTime& entry : task_by_increasing_end_max) {
1514 entry.time = helper_->EndMax(entry.task_index);
1515 }
1516 IncrementalSort(task_by_increasing_end_max.begin(),
1517 task_by_increasing_end_max.end());
1518
1519 const IntegerValue threshold = task_by_increasing_end_max.back().time;
1520 int queue_size = 0;
1521 for (const TaskTime entry : task_by_increasing_start_max) {
1522 const int task = entry.task_index;
1523 const IntegerValue start_max = helper_->StartMax(task);
1524 DCHECK(helper_->IsPresent(task));
1525 if (start_max < threshold) {
1526 task_by_increasing_start_max[queue_size++] = {task, start_max};
1527 }
1528 }
1529
1530 // If the size is one, we cannot propagate more than the detectable precedence
1531 // propagator.
1532 if (queue_size <= 1) return true;
1533
1534 task_by_increasing_start_max =
1535 task_by_increasing_start_max.subspan(0, queue_size);
1536 std::sort(task_by_increasing_start_max.begin(),
1537 task_by_increasing_start_max.end());
1538
1539 task_set.Clear();
1540 int queue_index = 0;
1541 for (const auto task_time : task_by_increasing_end_max) {
1542 const int t = task_time.task_index;
1543 const IntegerValue end_max = task_time.time;
1544
1545 // We filtered absent task before, but it is possible that as we push
1546 // bounds of optional tasks, more task become absent.
1547 if (helper_->IsAbsent(t)) continue;
1548
1549 // task_set_ contains all the tasks that must start before the end-max of t.
1550 // These are the only candidates that have a chance to decrease the end-max
1551 // of t.
1552 while (queue_index < queue_size) {
1553 const auto to_insert = task_by_increasing_start_max[queue_index];
1554 const IntegerValue start_max = to_insert.time;
1555 if (end_max <= start_max) break;
1556
1557 const int task_index = to_insert.task_index;
1558 DCHECK(helper_->IsPresent(task_index));
1559 task_set.AddEntry({task_index, helper_->ShiftedStartMin(task_index),
1560 helper_->SizeMin(task_index)});
1561 ++queue_index;
1562 }
1563
1564 // In the following case, task t cannot be after all the critical tasks
1565 // (i.e. it cannot be last):
1566 //
1567 // [(critical tasks)
1568 // | <- t start-max
1569 //
1570 // So we can deduce that the end-max of t is smaller than or equal to the
1571 // largest start-max of the critical tasks.
1572 //
1573 // Note that this works as well when the presence of t is still unknown.
1574 int critical_index = 0;
1575 const IntegerValue end_min_of_critical_tasks =
1576 task_set.ComputeEndMin(/*task_to_ignore=*/t, &critical_index);
1577 if (end_min_of_critical_tasks <= helper_->StartMax(t)) continue;
1578
1579 // Find the largest start-max of the critical tasks (excluding t). The
1580 // end-max for t need to be smaller than or equal to this.
1581 IntegerValue largest_ct_start_max = kMinIntegerValue;
1582 const absl::Span<const TaskSet::Entry> sorted_tasks =
1583 task_set.SortedTasks();
1584 const int sorted_tasks_size = sorted_tasks.size();
1585 for (int i = critical_index; i < sorted_tasks_size; ++i) {
1586 const int ct = sorted_tasks[i].task;
1587 if (t == ct) continue;
1588 const IntegerValue start_max = helper_->StartMax(ct);
1589
1590 // If we already known that t is after ct we can have a tighter start-max.
1591 if (start_max > largest_ct_start_max &&
1592 helper_->GetCurrentMinDistanceBetweenTasks(ct, t) < 0) {
1593 largest_ct_start_max = start_max;
1594 }
1595 }
1596
1597 // If we have any critical task, the test will always be true because
1598 // of the tasks we put in task_set_.
1599 DCHECK(largest_ct_start_max == kMinIntegerValue ||
1600 end_max > largest_ct_start_max);
1601 if (end_max > largest_ct_start_max) {
1602 helper_->ResetReason();
1603
1604 const IntegerValue window_start = sorted_tasks[critical_index].start_min;
1605 for (int i = critical_index; i < sorted_tasks_size; ++i) {
1606 const int ct = sorted_tasks[i].task;
1607 if (ct == t) continue;
1608 helper_->AddPresenceReason(ct);
1609 helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min,
1610 window_start);
1611 if (helper_->GetCurrentMinDistanceBetweenTasks(ct, t) >= 0) {
1612 helper_->AddReasonForBeingBeforeAssumingNoOverlap(ct, t);
1613 } else {
1614 helper_->AddStartMaxReason(ct, largest_ct_start_max);
1615 }
1616 }
1617
1618 // Add the reason for t, we only need the start-max.
1619 helper_->AddStartMaxReason(t, end_min_of_critical_tasks - 1);
1620
1621 // If largest_ct_start_max == kMinIntegerValue, we have a conflict. To
1622 // avoid integer overflow, we report it directly. This might happen
1623 // because the task is known to be after all the other, and thus it cannot
1624 // be "not last".
1625 if (largest_ct_start_max == kMinIntegerValue) {
1626 return helper_->ReportConflict();
1627 }
1628
1629 // Enqueue the new end-max for t.
1630 // Note that changing it will not influence the rest of the loop.
1631 ++stats_.num_propagations;
1632 if (!helper_->DecreaseEndMax(t, largest_ct_start_max)) return false;
1633 }
1634 }
1635 return true;
1636}
1637
1639 const int id = watcher->Register(this);
1640 helper_->WatchAllTasks(id);
1642 return id;
1643}
1644
1646 if (!helper_->IsEnforced()) return true;
1647 stats_.OnPropagate();
1648 const int num_tasks = helper_->NumTasks();
1649 if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) {
1650 ++stats_.num_conflicts;
1651 return false;
1652 }
1653 is_gray_.resize(num_tasks, false);
1654 non_gray_task_to_event_.resize(num_tasks);
1655
1656 FixedCapacityVector<TaskTime>& window = helper_->GetScratchTaskTimeVector1();
1657
1658 // This only contains non-gray tasks.
1659 FixedCapacityVector<TaskTime>& task_by_increasing_end_max =
1660 helper_->GetScratchTaskTimeVector2();
1661
1662 IntegerValue window_end = kMinIntegerValue;
1663 for (const auto [task, presence_lit, shifted_smin] :
1664 helper_->TaskByIncreasingShiftedStartMin()) {
1665 if (helper_->IsAbsent(presence_lit)) continue;
1666
1667 // Note that we use the real start min here not the shifted one. This is
1668 // because we might be able to push it if it is smaller than window end.
1669 if (helper_->StartMin(task) < window_end) {
1670 window.push_back({task, shifted_smin});
1671 window_end += helper_->SizeMin(task);
1672 continue;
1673 }
1674
1675 // We need at least 3 tasks for the edge-finding to be different from
1676 // detectable precedences.
1677 if (window.size() > 2 &&
1678 !PropagateSubwindow(window_end, absl::MakeSpan(window),
1679 task_by_increasing_end_max)) {
1680 ++stats_.num_conflicts;
1681 return false;
1682 }
1683
1684 // Corner case: The propagation of the previous window might have made the
1685 // current task absent even if it wasn't at the loop beginning.
1686 if (helper_->IsAbsent(presence_lit)) {
1687 window.clear();
1688 window_end = kMinIntegerValue;
1689 continue;
1690 }
1691
1692 // Start of the next window.
1693 window.clear();
1694 window.push_back({task, shifted_smin});
1695 window_end = shifted_smin + helper_->SizeMin(task);
1696 }
1697 if (window.size() > 2 &&
1698 !PropagateSubwindow(window_end, absl::MakeSpan(window),
1699 task_by_increasing_end_max)) {
1700 ++stats_.num_conflicts;
1701 return false;
1702 }
1703
1704 stats_.EndWithoutConflicts();
1705 return true;
1706}
1707
1708bool DisjunctiveEdgeFinding::PropagateSubwindow(
1709 IntegerValue window_end_min, absl::Span<const TaskTime> window,
1710 FixedCapacityVector<TaskTime>& task_by_increasing_end_max) {
1711 // Cache the task end-max and abort early if possible.
1712 task_by_increasing_end_max.clear();
1713 for (const auto task_time : window) {
1714 const int task = task_time.task_index;
1715 DCHECK(!helper_->IsAbsent(task));
1716
1717 // We already mark all the non-present task as gray.
1718 //
1719 // Same for task with an end-max that is too large: Tasks that are not
1720 // present can never trigger propagation or an overload checking failure.
1721 // theta_tree_.GetOptionalEnvelope() is always <= window_end, so tasks whose
1722 // end_max is >= window_end can never trigger propagation or failure either.
1723 // Thus, those tasks can be marked as gray, which removes their contribution
1724 // to theta right away.
1725 const IntegerValue end_max = helper_->EndMax(task);
1726 if (helper_->IsPresent(task) && end_max < window_end_min) {
1727 is_gray_[task] = false;
1728 task_by_increasing_end_max.push_back({task, end_max});
1729 } else {
1730 is_gray_[task] = true;
1731 }
1732 }
1733
1734 // If we have just 1 non-gray task, then this propagator does not propagate
1735 // more than the detectable precedences, so we abort early.
1736 if (task_by_increasing_end_max.size() < 2) return true;
1737 std::sort(task_by_increasing_end_max.begin(),
1738 task_by_increasing_end_max.end());
1739
1740 // Set up theta tree.
1741 //
1742 // Some task in the theta tree will be considered "gray".
1743 // When computing the end-min of the sorted task, we will compute it for:
1744 // - All the non-gray task
1745 // - All the non-gray task + at most one gray task.
1746 //
1747 // TODO(user): it should be faster to initialize it all at once rather
1748 // than calling AddOrUpdate() n times.
1749 const int window_size = window.size();
1750 event_size_.clear();
1751 theta_tree_.Reset(window_size);
1752 for (int event = 0; event < window_size; ++event) {
1753 const TaskTime task_time = window[event];
1754 const int task = task_time.task_index;
1755 const IntegerValue energy_min = helper_->SizeMin(task);
1756 event_size_.push_back(energy_min);
1757 if (is_gray_[task]) {
1758 theta_tree_.AddOrUpdateOptionalEvent(event, task_time.time, energy_min);
1759 } else {
1760 non_gray_task_to_event_[task] = event;
1761 theta_tree_.AddOrUpdateEvent(event, task_time.time, energy_min,
1762 energy_min);
1763 }
1764 }
1765
1766 // At each iteration we either transform a non-gray task into a gray one or
1767 // remove a gray task, so this loop is linear in complexity.
1768 while (true) {
1769 DCHECK(!is_gray_[task_by_increasing_end_max.back().task_index]);
1770 const IntegerValue non_gray_end_max =
1771 task_by_increasing_end_max.back().time;
1772
1773 // Overload checking.
1774 const IntegerValue non_gray_end_min = theta_tree_.GetEnvelope();
1775 if (non_gray_end_min > non_gray_end_max) {
1776 helper_->ResetReason();
1777
1778 // We need the reasons for the critical tasks to fall in:
1779 const int critical_event =
1780 theta_tree_.GetMaxEventWithEnvelopeGreaterThan(non_gray_end_max);
1781 const IntegerValue window_start = window[critical_event].time;
1782 const IntegerValue window_end =
1783 theta_tree_.GetEnvelopeOf(critical_event) - 1;
1784 for (int event = critical_event; event < window_size; event++) {
1785 const int task = window[event].task_index;
1786 if (is_gray_[task]) continue;
1787 helper_->AddPresenceReason(task);
1788 helper_->AddEnergyAfterReason(task, event_size_[event], window_start);
1789 helper_->AddEndMaxReason(task, window_end);
1790 }
1791 return helper_->ReportConflict();
1792 }
1793
1794 // Edge-finding.
1795 // If we have a situation like:
1796 // [(critical_task_with_gray_task)
1797 // ]
1798 // ^ end-max without the gray task.
1799 //
1800 // Then the gray task must be after all the critical tasks (all the non-gray
1801 // tasks in the tree actually), otherwise there will be no way to schedule
1802 // the critical_tasks inside their time window.
1803 while (theta_tree_.GetOptionalEnvelope() > non_gray_end_max) {
1804 const IntegerValue end_min_with_gray = theta_tree_.GetOptionalEnvelope();
1805 int critical_event_with_gray;
1806 int gray_event;
1807 IntegerValue available_energy;
1808 theta_tree_.GetEventsWithOptionalEnvelopeGreaterThan(
1809 non_gray_end_max, &critical_event_with_gray, &gray_event,
1810 &available_energy);
1811 const int gray_task = window[gray_event].task_index;
1812 DCHECK(is_gray_[gray_task]);
1813
1814 // This might happen in the corner case where more than one interval are
1815 // controlled by the same Boolean.
1816 if (helper_->IsAbsent(gray_task)) {
1817 theta_tree_.RemoveEvent(gray_event);
1818 continue;
1819 }
1820
1821 // Since the gray task is after all the other, we have a new lower bound.
1822 if (helper_->StartMin(gray_task) < non_gray_end_min) {
1823 // The API is not ideal here. We just want the start of the critical
1824 // tasks that explain the non_gray_end_min computed above.
1825 const int critical_event =
1826 theta_tree_.GetMaxEventWithEnvelopeGreaterThan(non_gray_end_min -
1827 1);
1828
1829 // Even if we need less task to explain the overload, because we are
1830 // going to explain the full non_gray_end_min, we can relax the
1831 // explanation.
1832 if (critical_event_with_gray > critical_event) {
1833 critical_event_with_gray = critical_event;
1834 }
1835
1836 const int first_event = critical_event_with_gray;
1837 const int second_event = critical_event;
1838 const IntegerValue first_start = window[first_event].time;
1839 const IntegerValue second_start = window[second_event].time;
1840
1841 // window_end is chosen to be has big as possible and still have an
1842 // overload if the gray task is not last.
1843 //
1844 // If gray_task is with variable duration, it is possible that it must
1845 // be last just because is end-min is large.
1846 bool use_energy_reason = true;
1847 IntegerValue window_end;
1848 if (helper_->EndMin(gray_task) > non_gray_end_max) {
1849 // This is actually a form of detectable precedence.
1850 //
1851 // TODO(user): We could relax the reason more, but this happen really
1852 // rarely it seems.
1853 use_energy_reason = false;
1854 window_end = helper_->EndMin(gray_task) - 1;
1855 } else {
1856 window_end = end_min_with_gray - 1;
1857 }
1858 CHECK_GE(window_end, non_gray_end_max);
1859
1860 // The non-gray part of the explanation as detailed above.
1861 helper_->ResetReason();
1862 bool all_before = true;
1863 for (int event = first_event; event < window_size; event++) {
1864 const int task = window[event].task_index;
1865 if (is_gray_[task]) continue;
1866 helper_->AddPresenceReason(task);
1867 helper_->AddEnergyAfterReason(
1868 task, event_size_[event],
1869 event >= second_event ? second_start : first_start);
1870
1871 const IntegerValue dist =
1872 helper_->GetCurrentMinDistanceBetweenTasks(task, gray_task);
1873 if (dist >= 0) {
1874 helper_->AddReasonForBeingBeforeAssumingNoOverlap(task, gray_task);
1875 } else {
1876 all_before = false;
1877 helper_->AddEndMaxReason(task, window_end);
1878 }
1879 }
1880
1881 // Add the reason for the gray_task (we don't need the end-max or
1882 // presence reason) needed for the precedences.
1883 if (!all_before) {
1884 if (use_energy_reason) {
1885 helper_->AddEnergyAfterReason(gray_task, event_size_[gray_event],
1886 first_start);
1887 } else {
1888 helper_->AddEndMinReason(gray_task, helper_->EndMin(gray_task));
1889 }
1890 }
1891
1892 // Process detected precedence.
1893 if (helper_->CurrentDecisionLevel() == 0 &&
1894 helper_->IsPresent(gray_task)) {
1895 for (int i = first_event; i < window_size; ++i) {
1896 const int task = window[i].task_index;
1897 if (!is_gray_[task]) {
1898 if (!helper_->NotifyLevelZeroPrecedence(task, gray_task)) {
1899 return false;
1900 }
1901 }
1902 }
1903 }
1904
1905 // Enqueue the new start-min for gray_task.
1906 //
1907 // TODO(user): propagate the precedence Boolean here too? I think it
1908 // will be more powerful. Even if eventually all these precedence will
1909 // become detectable (see Petr Villim PhD).
1910 ++stats_.num_propagations;
1911 if (!helper_->IncreaseStartMin(gray_task, non_gray_end_min)) {
1912 return false;
1913 }
1914 }
1915
1916 // Remove the gray_task.
1917 theta_tree_.RemoveEvent(gray_event);
1918 }
1919
1920 // Stop before we get just one non-gray task.
1921 if (task_by_increasing_end_max.size() <= 2) break;
1922
1923 // Stop if the min of end_max is too big.
1924 if (task_by_increasing_end_max[0].time >=
1925 theta_tree_.GetOptionalEnvelope()) {
1926 break;
1927 }
1928
1929 // Make the non-gray task with larger end-max gray.
1930 const int new_gray_task = task_by_increasing_end_max.back().task_index;
1931 task_by_increasing_end_max.pop_back();
1932 const int new_gray_event = non_gray_task_to_event_[new_gray_task];
1933 DCHECK(!is_gray_[new_gray_task]);
1934 is_gray_[new_gray_task] = true;
1935 theta_tree_.AddOrUpdateOptionalEvent(new_gray_event,
1936 window[new_gray_event].time,
1937 event_size_[new_gray_event]);
1938 }
1939
1940 return true;
1941}
1942
1944 const int id = watcher->Register(this);
1945 helper_->SetTimeDirection(time_direction_);
1946 helper_->WatchAllTasks(id);
1948 return id;
1949}
1950
1951} // namespace sat
1952} // namespace operations_research
void AddOrUpdateEvent(int event, IntegerType initial_envelope, IntegerType energy_min, IntegerType energy_max)
Definition scheduling.h:106
void AdvanceDeterministicTime(double deterministic_duration)
Definition time_limit.h:186
void AddNoOverlap(absl::Span< const IntervalVariable > var)
int RegisterWith(GenericLiteralWatcher *watcher)
int RegisterWith(GenericLiteralWatcher *watcher)
int RegisterWith(GenericLiteralWatcher *watcher)
int RegisterWith(GenericLiteralWatcher *watcher)
int RegisterWith(GenericLiteralWatcher *watcher)
int RegisterWith(GenericLiteralWatcher *watcher)
void CollectPrecedences(absl::Span< const IntegerVariable > vars, std::vector< PrecedenceData > *output)
int Register(PropagatorInterface *propagator)
Definition integer.cc:2674
SchedulingConstraintHelper * GetOrCreateHelper(std::vector< Literal > enforcement_literals, const std::vector< IntervalVariable > &variables, bool register_as_disjunctive_helper=false)
Definition intervals.cc:357
SchedulingDemandHelper * GetOrCreateDemandHelper(SchedulingConstraintHelper *helper, absl::Span< const AffineExpression > demands)
Definition intervals.cc:450
AffineExpression Start(IntervalVariable i) const
Definition intervals.h:94
bool IsOptional(IntervalVariable i) const
Definition intervals.h:71
IntegerValue MaxSize(IntervalVariable i) const
Definition intervals.h:103
IntegerValue MinSize(IntervalVariable i) const
Definition intervals.h:98
std::vector< IntervalVariable > AllIntervals() const
Definition intervals.h:108
T Add(std::function< T(Model *)> f)
Definition model.h:104
void AddEnergyAfterReason(int t, IntegerValue energy_min, IntegerValue time)
ABSL_MUST_USE_RESULT bool PushTaskOrderWhenPresent(int t_before, int t_after)
absl::Span< const AffineExpression > Ends() const
void NotifyEntryIsNowLastIfPresent(const Entry &e)
IntegerValue ComputeEndMin(int task_to_ignore, int *critical_index) const
SchedulingConstraintHelper::TaskInfo Entry
Definition disjunctive.h:65
IntegerValue ComputeEndMin() const
absl::Span< const Entry > SortedTasks() const
void AddShiftedStartMinEntry(const SchedulingConstraintHelper &helper, int t)
void RegisterWith(GenericLiteralWatcher *watcher)
Definition timetable.cc:378
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
const IntegerVariable kNoIntegerVariable(-1)
std::function< void(Model *)> AllDifferentOnBounds(absl::Span< const Literal > enforcement_literals, absl::Span< const AffineExpression > expressions)
std::pair< LinearExpression2, IntegerValue > EncodeDifferenceLowerThan(AffineExpression a, AffineExpression b, IntegerValue ub)
void AddDisjunctive(const std::vector< Literal > &enforcement_literals, const std::vector< IntervalVariable > &intervals, Model *model)
void AddDisjunctiveWithBooleanPrecedencesOnly(absl::Span< const IntervalVariable > intervals, Model *model)
OR-Tools root namespace.
const bool DEBUG_MODE
Definition radix_sort.h:266
void IncrementalSort(int max_comparisons, Iterator begin, Iterator end, Compare comp=Compare{}, bool is_stable=false)
Definition sort.h:46
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)