Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
scheduling_cuts.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdint.h>
17
18#include <algorithm>
19#include <cmath>
20#include <limits>
21#include <optional>
22#include <string>
23#include <tuple>
24#include <utility>
25#include <vector>
26
27#include "absl/base/attributes.h"
28#include "absl/container/btree_set.h"
29#include "absl/container/flat_hash_map.h"
30#include "absl/log/check.h"
31#include "absl/strings/str_cat.h"
32#include "absl/strings/str_join.h"
33#include "absl/strings/string_view.h"
34#include "absl/types/span.h"
38#include "ortools/sat/cuts.h"
39#include "ortools/sat/integer.h"
44#include "ortools/sat/model.h"
49#include "ortools/sat/util.h"
53
54namespace operations_research {
55namespace sat {
56
57namespace {
58// Minimum amount of violation of the cut constraint by the solution. This
59// is needed to avoid numerical issues and adding cuts with minor effect.
60const double kMinCutViolation = 1e-4;
61
62// Checks that the literals of the decomposed energy (if present) and the size
63// and demand of a cumulative task are in sync.
64// In some rare instances, this is not the case. In that case, it is better not
65// to try to generate cuts.
66bool DecomposedEnergyIsPropagated(const VariablesAssignment& assignment, int t,
68 SchedulingDemandHelper* demands_helper) {
69 const std::vector<LiteralValueValue>& decomposed_energy =
70 demands_helper->DecomposedEnergies()[t];
71 if (decomposed_energy.empty()) return true;
72
73 // Checks the propagation of the exactly_one constraint on literals.
74 int num_false_literals = 0;
75 int num_true_literals = 0;
76 for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) {
77 if (assignment.LiteralIsFalse(lit)) ++num_false_literals;
78 if (assignment.LiteralIsTrue(lit)) ++num_true_literals;
79 }
80 if (num_true_literals > 1) return false;
81 if (num_true_literals == 1 &&
82 num_false_literals < decomposed_energy.size() - 1) {
83 return false;
84 }
85 if (num_false_literals == decomposed_energy.size()) return false;
86 if (decomposed_energy.size() == 1 && num_true_literals != 1) return false;
87
88 // Checks the propagations of the bounds of the size and the demand.
89 IntegerValue propagated_size_min = kMaxIntegerValue;
90 IntegerValue propagated_size_max = kMinIntegerValue;
91 IntegerValue propagated_demand_min = kMaxIntegerValue;
92 IntegerValue propagated_demand_max = kMinIntegerValue;
93 for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) {
94 if (assignment.LiteralIsFalse(lit)) continue;
95
96 if (assignment.LiteralIsTrue(lit)) {
97 if (fixed_size != helper->SizeMin(t) ||
98 fixed_size != helper->SizeMax(t) ||
99 fixed_demand != demands_helper->DemandMin(t) ||
100 fixed_demand != demands_helper->DemandMax(t)) {
101 return false;
102 }
103 return true;
104 }
105
106 if (fixed_size < helper->SizeMin(t) || fixed_size > helper->SizeMax(t) ||
107 fixed_demand < demands_helper->DemandMin(t) ||
108 fixed_demand > demands_helper->DemandMax(t)) {
109 return false;
110 }
111 propagated_size_min = std::min(propagated_size_min, fixed_size);
112 propagated_size_max = std::max(propagated_size_max, fixed_size);
113 propagated_demand_min = std::min(propagated_demand_min, fixed_demand);
114 propagated_demand_max = std::max(propagated_demand_max, fixed_demand);
115 }
116
117 if (propagated_size_min != helper->SizeMin(t) ||
118 propagated_size_max != helper->SizeMax(t) ||
119 propagated_demand_min != demands_helper->DemandMin(t) ||
120 propagated_demand_max != demands_helper->DemandMax(t)) {
121 return false;
122 }
123
124 return true;
125}
126
127} // namespace
128
131 : start_min(x_helper->StartMin(t)),
132 start_max(x_helper->StartMax(t)),
133 end_min(x_helper->EndMin(t)),
134 end_max(x_helper->EndMax(t)),
135 size_min(x_helper->SizeMin(t)) {}
136
137 // Cache of the bounds of the interval
138 IntegerValue start_min;
139 IntegerValue start_max;
140 IntegerValue end_min;
141 IntegerValue end_max;
142 IntegerValue size_min;
143
144 // Cache of the bounds of the demand.
145 IntegerValue demand_min;
146
147 // The energy min of this event.
148 IntegerValue energy_min;
149
150 // If non empty, a decomposed view of the energy of this event.
151 // First value in each pair is size, second is demand.
152 std::vector<LiteralValueValue> decomposed_energy;
154
155 // We need this for linearizing the energy in some cases.
157
158 // If set, this event is optional and its presence is controlled by this.
160
161 // A linear expression which is a valid lower bound on the total energy of
162 // this event. We also cache the activity of the expression to not recompute
163 // it all the time.
166
167 // True if linearized_energy is not exact and a McCormick relaxation.
169
170 // The actual value of the presence literal of the interval(s) is checked
171 // when the event is created. A value of kNoLiteralIndex indicates that either
172 // the interval was not optional, or that its presence literal is true at
173 // level zero.
175
176 // Computes the mandatory minimal overlap of the interval with the time window
177 // [start, end].
178 IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const {
179 return std::max(
180 std::min({end_min - start, end - start_max, size_min, end - start}),
181 IntegerValue(0));
182 }
183
184 // This method expects all the other fields to have been filled before.
185 // It must be called before the EnergyEvent is used.
186 ABSL_MUST_USE_RESULT bool FillEnergyLp(
187 AffineExpression size,
189 Model* model) {
190 LinearConstraintBuilder tmp_energy(model);
191 if (IsPresent()) {
192 if (!decomposed_energy.empty()) {
193 if (!tmp_energy.AddDecomposedProduct(decomposed_energy)) return false;
194 } else {
195 tmp_energy.AddQuadraticLowerBound(size, demand,
196 model->GetOrCreate<IntegerTrail>(),
198 }
199 } else {
201 energy_min)) {
202 return false;
203 }
204 }
205 linearized_energy = tmp_energy.BuildExpression();
207 return true;
208 }
209
210 std::string DebugString() const {
211 return absl::StrCat(
212 "EnergyEvent(start_min = ", start_min, ", start_max = ", start_max,
213 ", end_min = ", end_min, ", end_max = ", end_max,
214 ", demand = ", demand.DebugString(), ", energy = ",
215 decomposed_energy.empty()
216 ? "{}"
217 : absl::StrCat(decomposed_energy.size(), " terms"),
218 ", presence_literal_index = ", presence_literal_index, ")");
219 }
220};
221
222namespace {
223
224// Compute the energetic contribution of a task in a given time window, and
225// add it to the cut. It returns false if it tried to generate the cut, and
226// failed.
227ABSL_MUST_USE_RESULT bool AddOneEvent(
228 const EnergyEvent& event, IntegerValue window_start,
229 IntegerValue window_end, LinearConstraintBuilder& cut,
230 bool* add_energy_to_name = nullptr, bool* add_quadratic_to_name = nullptr,
231 bool* add_opt_to_name = nullptr, bool* add_lifted_to_name = nullptr) {
232 if (event.end_min <= window_start || event.start_max >= window_end) {
233 return true; // Event can move outside the time window.
234 }
235
236 if (event.start_min >= window_start && event.end_max <= window_end) {
237 // Event is always contained by the time window.
238 cut.AddLinearExpression(event.linearized_energy);
239
240 if (event.energy_is_quadratic && add_quadratic_to_name != nullptr) {
241 *add_quadratic_to_name = true;
242 }
243 if (add_energy_to_name != nullptr &&
244 event.energy_min > event.size_min * event.demand_min) {
245 *add_energy_to_name = true;
246 }
247 if (!event.IsPresent() && add_opt_to_name != nullptr) {
248 *add_opt_to_name = true;
249 }
250 } else { // The event has a mandatory overlap with the time window.
251 const IntegerValue min_overlap =
252 event.GetMinOverlap(window_start, window_end);
253 if (min_overlap <= 0) return true;
254 if (add_lifted_to_name != nullptr) *add_lifted_to_name = true;
255
256 if (event.IsPresent()) {
257 const std::vector<LiteralValueValue>& energy = event.decomposed_energy;
258 if (energy.empty()) {
259 cut.AddTerm(event.demand, min_overlap);
260 } else {
261 const IntegerValue window_size = window_end - window_start;
262 for (const auto [lit, fixed_size, fixed_demand] : energy) {
263 const IntegerValue alt_end_min =
264 std::max(event.end_min, event.start_min + fixed_size);
265 const IntegerValue alt_start_max =
266 std::min(event.start_max, event.end_max - fixed_size);
267 const IntegerValue energy_min =
268 fixed_demand *
269 std::min({alt_end_min - window_start, window_end - alt_start_max,
270 fixed_size, window_size});
271 if (energy_min == 0) continue;
272 if (!cut.AddLiteralTerm(lit, energy_min)) return false;
273 }
274 if (add_energy_to_name != nullptr) *add_energy_to_name = true;
275 }
276 } else {
277 if (add_opt_to_name != nullptr) *add_opt_to_name = true;
278 const IntegerValue min_energy = ComputeEnergyMinInWindow(
279 event.start_min, event.start_max, event.end_min, event.end_max,
280 event.size_min, event.demand_min, event.decomposed_energy,
281 window_start, window_end);
282 if (min_energy > event.size_min * event.demand_min &&
283 add_energy_to_name != nullptr) {
284 *add_energy_to_name = true;
285 }
286 if (!cut.AddLiteralTerm(Literal(event.presence_literal_index),
287 min_energy)) {
288 return false;
289 }
290 }
291 }
292 return true;
293}
294
295// Returns the list of all possible demand values for the given event.
296// It returns an empty vector is the number of values is too large.
297std::vector<int64_t> FindPossibleDemands(const EnergyEvent& event,
298 const VariablesAssignment& assignment,
299 IntegerTrail* integer_trail) {
300 std::vector<int64_t> possible_demands;
301 if (event.decomposed_energy.empty()) {
302 if (integer_trail->IsFixed(event.demand)) {
303 possible_demands.push_back(
304 integer_trail->FixedValue(event.demand).value());
305 } else {
306 if (integer_trail->InitialVariableDomain(event.demand.var).Size() >
307 1000000) {
308 return {};
309 }
310 for (const int64_t var_value :
311 integer_trail->InitialVariableDomain(event.demand.var).Values()) {
312 possible_demands.push_back(event.demand.ValueAt(var_value).value());
313 }
314 }
315 } else {
316 for (const auto [lit, fixed_size, fixed_demand] : event.decomposed_energy) {
317 if (assignment.LiteralIsFalse(lit)) continue;
318 possible_demands.push_back(fixed_demand.value());
319 }
320 }
321 return possible_demands;
322}
323
324} // namespace
325
326// This cumulative energetic cut generator will split the cumulative span in 2
327// regions.
328//
329// In the region before the min of the makespan, we will compute a more
330// precise reachable profile and have a better estimation of the energy
331// available between two time point. the improvement can come from two sources:
332// - subset sum indicates that the max capacity cannot be reached.
333// - sum of demands < max capacity.
334//
335// In the region after the min of the makespan, we will use
336// fixed_capacity * (makespan - makespan_min)
337// as the available energy.
339 absl::string_view cut_name,
341 std::vector<EnergyEvent> events, IntegerValue capacity,
342 AffineExpression makespan, TimeLimit* time_limit, Model* model,
343 LinearConstraintManager* manager) {
344 // Checks the precondition of the code.
345 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
346 DCHECK(integer_trail->IsFixed(capacity));
347
348 const VariablesAssignment& assignment =
349 model->GetOrCreate<Trail>()->Assignment();
350
351 // Compute relevant time points.
352 // TODO(user): We could reduce this set.
353 // TODO(user): we can compute the max usage between makespan_min and
354 // makespan_max.
355 std::vector<IntegerValue> time_points;
356 std::vector<std::vector<int64_t>> possible_demands(events.size());
357 const IntegerValue makespan_min = integer_trail->LowerBound(makespan);
358 IntegerValue max_end_min = kMinIntegerValue; // Used to abort early.
359 IntegerValue max_end_max = kMinIntegerValue; // Used as a sentinel.
360 for (int i = 0; i < events.size(); ++i) {
361 const EnergyEvent& event = events[i];
362 if (event.start_min < makespan_min) {
363 time_points.push_back(event.start_min);
364 }
365 if (event.start_max < makespan_min) {
366 time_points.push_back(event.start_max);
367 }
368 if (event.end_min < makespan_min) {
369 time_points.push_back(event.end_min);
370 }
371 if (event.end_max < makespan_min) {
372 time_points.push_back(event.end_max);
373 }
374 max_end_min = std::max(max_end_min, event.end_min);
375 max_end_max = std::max(max_end_max, event.end_max);
376 possible_demands[i] = FindPossibleDemands(event, assignment, integer_trail);
377 }
378 time_points.push_back(makespan_min);
379 time_points.push_back(max_end_max);
381
382 const int num_time_points = time_points.size();
383 absl::flat_hash_map<IntegerValue, IntegerValue> reachable_capacity_ending_at;
384
385 MaxBoundedSubsetSum reachable_capacity_subset_sum(capacity.value());
386 for (int i = 1; i < num_time_points; ++i) {
387 const IntegerValue window_start = time_points[i - 1];
388 const IntegerValue window_end = time_points[i];
389 reachable_capacity_subset_sum.Reset(capacity.value());
390 for (int i = 0; i < events.size(); ++i) {
391 const EnergyEvent& event = events[i];
392 if (event.start_min >= window_end || event.end_max <= window_start) {
393 continue;
394 }
395 if (possible_demands[i].empty()) { // Number of values was too large.
396 // In practice, it stops the DP as the upper bound is reached.
397 reachable_capacity_subset_sum.Add(capacity.value());
398 } else {
399 reachable_capacity_subset_sum.AddChoices(possible_demands[i]);
400 }
401 if (reachable_capacity_subset_sum.CurrentMax() == capacity.value()) break;
402 }
403 reachable_capacity_ending_at[window_end] =
404 reachable_capacity_subset_sum.CurrentMax();
405 }
406
407 const double capacity_lp = ToDouble(capacity);
408 const double makespan_lp = makespan.LpValue(lp_values);
409 const double makespan_min_lp = ToDouble(makespan_min);
410 LinearConstraintBuilder temp_builder(model);
411 TopNCuts top_n_cuts(5);
412 for (int i = 0; i + 1 < num_time_points; ++i) {
413 // Checks the time limit if the problem is too big.
414 if (events.size() > 50 && time_limit->LimitReached()) return;
415
416 const IntegerValue window_start = time_points[i];
417 // After max_end_min, all tasks can fit before window_start.
418 if (window_start >= max_end_min) break;
419
420 IntegerValue cumulated_max_energy = 0;
421 IntegerValue cumulated_max_energy_before_makespan_min = 0;
422 bool use_subset_sum = false;
423 bool use_subset_sum_before_makespan_min = false;
424
425 for (int j = i + 1; j < num_time_points; ++j) {
426 const IntegerValue strip_start = time_points[j - 1];
427 const IntegerValue window_end = time_points[j];
428 const IntegerValue max_reachable_capacity_in_current_strip =
429 reachable_capacity_ending_at[window_end];
430 DCHECK_LE(max_reachable_capacity_in_current_strip, capacity);
431
432 // Update states for the name of the generated cut.
433 if (max_reachable_capacity_in_current_strip < capacity) {
434 use_subset_sum = true;
435 if (window_end <= makespan_min) {
436 use_subset_sum_before_makespan_min = true;
437 }
438 }
439
440 const IntegerValue energy_in_strip =
441 (window_end - strip_start) * max_reachable_capacity_in_current_strip;
442 cumulated_max_energy += energy_in_strip;
443 if (window_end <= makespan_min) {
444 cumulated_max_energy_before_makespan_min += energy_in_strip;
445 }
446
447 if (window_start >= makespan_min) {
448 DCHECK_EQ(cumulated_max_energy_before_makespan_min, 0);
449 }
450 DCHECK_LE(cumulated_max_energy, capacity * (window_end - window_start));
451 const double max_energy_up_to_makespan_lp =
452 strip_start >= makespan_min
453 ? ToDouble(cumulated_max_energy_before_makespan_min) +
454 (makespan_lp - makespan_min_lp) * capacity_lp
455 : std::numeric_limits<double>::infinity();
456
457 // We prefer using the makespan as the cut will tighten itself when the
458 // objective value is improved.
459 //
460 // We reuse the min cut violation to allow some slack in the comparison
461 // between the two computed energy values.
462 bool cut_generated = true;
463 bool add_opt_to_name = false;
464 bool add_lifted_to_name = false;
465 bool add_quadratic_to_name = false;
466 bool add_energy_to_name = false;
467
468 temp_builder.Clear();
469 const bool use_makespan =
470 max_energy_up_to_makespan_lp <=
471 ToDouble(cumulated_max_energy) + kMinCutViolation;
472 const bool use_subset_sum_in_cut =
473 use_makespan ? use_subset_sum_before_makespan_min : use_subset_sum;
474
475 if (use_makespan) { // Add the energy from makespan_min to makespan.
476 temp_builder.AddConstant(makespan_min * capacity);
477 temp_builder.AddTerm(makespan, -capacity);
478 }
479
480 // Add contributions from all events.
481 for (const EnergyEvent& event : events) {
482 if (!AddOneEvent(event, window_start, window_end, temp_builder,
483 &add_energy_to_name, &add_quadratic_to_name,
484 &add_opt_to_name, &add_lifted_to_name)) {
485 cut_generated = false;
486 break; // Exit the event loop.
487 }
488 }
489
490 // We can break here are any further iteration on j will hit the same
491 // issue.
492 if (!cut_generated) break;
493
494 // Build the cut to evaluate its efficacy.
495 const IntegerValue energy_rhs =
496 use_makespan ? cumulated_max_energy_before_makespan_min
497 : cumulated_max_energy;
498 LinearConstraint potential_cut =
499 temp_builder.BuildConstraint(kMinIntegerValue, energy_rhs);
500
501 if (potential_cut.NormalizedViolation(lp_values) >= kMinCutViolation) {
502 std::string full_name(cut_name);
503 if (add_energy_to_name) full_name.append("_energy");
504 if (add_lifted_to_name) full_name.append("_lifted");
505 if (use_makespan) full_name.append("_makespan");
506 if (add_opt_to_name) full_name.append("_optional");
507 if (add_quadratic_to_name) full_name.append("_quadratic");
508 if (use_subset_sum_in_cut) full_name.append("_subsetsum");
509 top_n_cuts.AddCut(std::move(potential_cut), full_name, lp_values);
510 }
511 }
512 }
513
514 top_n_cuts.TransferToManager(manager);
515}
516
518 absl::string_view cut_name,
520 std::vector<EnergyEvent> events, const AffineExpression& capacity,
522 double max_possible_energy_lp = 0.0;
523 for (const EnergyEvent& event : events) {
524 max_possible_energy_lp += event.linearized_energy_lp_value;
525 }
526
527 // Currently, we look at all the possible time windows, and will push all cuts
528 // in the TopNCuts object. From our observations, this generator creates only
529 // a few cuts for a given run.
530 //
531 // The complexity of this loop is n^3. if we follow the latest research, we
532 // could implement this in n log^2(n). Still, this is not visible in the
533 // profile as we only this method at the root node,
534 const double capacity_lp = capacity.LpValue(lp_values);
535
536 // Compute relevant time points.
537 // TODO(user): We could reduce this set.
538 absl::btree_set<IntegerValue> time_points_set;
539 IntegerValue max_end_min = kMinIntegerValue;
540 for (const EnergyEvent& event : events) {
541 time_points_set.insert(event.start_min);
542 time_points_set.insert(event.start_max);
543 time_points_set.insert(event.end_min);
544 time_points_set.insert(event.end_max);
545 max_end_min = std::max(max_end_min, event.end_min);
546 }
547 const std::vector<IntegerValue> time_points(time_points_set.begin(),
548 time_points_set.end());
549 const int num_time_points = time_points.size();
550
551 LinearConstraintBuilder temp_builder(model);
552 TopNCuts top_n_cuts(5);
553 for (int i = 0; i + 1 < num_time_points; ++i) {
554 // Checks the time limit if the problem is too big.
555 if (events.size() > 50 && time_limit->LimitReached()) return;
556
557 const IntegerValue window_start = time_points[i];
558 // After max_end_min, all tasks can fit before window_start.
559 if (window_start >= max_end_min) break;
560
561 for (int j = i + 1; j < num_time_points; ++j) {
562 const IntegerValue window_end = time_points[j];
563 const double available_energy_lp =
564 ToDouble(window_end - window_start) * capacity_lp;
565 if (available_energy_lp >= max_possible_energy_lp) break;
566
567 bool cut_generated = true;
568 bool add_opt_to_name = false;
569 bool add_lifted_to_name = false;
570 bool add_quadratic_to_name = false;
571 bool add_energy_to_name = false;
572 temp_builder.Clear();
573
574 // Compute the max energy available for the tasks.
575 temp_builder.AddTerm(capacity, window_start - window_end);
576
577 // Add all contributions.
578 for (const EnergyEvent& event : events) {
579 if (!AddOneEvent(event, window_start, window_end, temp_builder,
580 &add_energy_to_name, &add_quadratic_to_name,
581 &add_opt_to_name, &add_lifted_to_name)) {
582 cut_generated = false;
583 break; // Exit the event loop.
584 }
585 }
586
587 // We can break here are any further iteration on j will hit the same
588 // issue.
589 if (!cut_generated) break;
590
591 // Build the cut to evaluate its efficacy.
592 LinearConstraint potential_cut =
593 temp_builder.BuildConstraint(kMinIntegerValue, 0);
594
595 if (potential_cut.NormalizedViolation(lp_values) >= kMinCutViolation) {
596 std::string full_name(cut_name);
597 if (add_energy_to_name) full_name.append("_energy");
598 if (add_lifted_to_name) full_name.append("_lifted");
599 if (add_opt_to_name) full_name.append("_optional");
600 if (add_quadratic_to_name) full_name.append("_quadratic");
601 top_n_cuts.AddCut(std::move(potential_cut), full_name, lp_values);
602 }
603 }
604 }
605
606 top_n_cuts.TransferToManager(manager);
607}
608
610 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
611 const AffineExpression& capacity,
612 const std::optional<AffineExpression>& makespan, Model* model) {
613 CutGenerator result;
614 result.only_run_at_level_zero = true;
615 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
616 &result.vars);
618 helper, model, &result.vars,
620 if (makespan.has_value() && !makespan.value().IsConstant()) {
621 result.vars.push_back(makespan.value().var);
622 }
624 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
626 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
627
628 result.generate_cuts = [makespan, capacity, demands_helper, helper,
629 integer_trail, time_limit, sat_solver,
630 model](LinearConstraintManager* manager) {
631 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
632 if (!demands_helper->CacheAllEnergyValues()) return true;
633
634 const auto& lp_values = manager->LpValues();
635 const VariablesAssignment& assignment = sat_solver->Assignment();
636
637 std::vector<EnergyEvent> events;
638 for (int i = 0; i < helper->NumTasks(); ++i) {
639 if (helper->IsAbsent(i)) continue;
640 // TODO(user): use level 0 bounds ?
641 if (demands_helper->DemandMax(i) == 0 || helper->SizeMin(i) == 0) {
642 continue;
643 }
644 if (!DecomposedEnergyIsPropagated(assignment, i, helper,
645 demands_helper)) {
646 return true; // Propagation did not reach a fixed point. Abort.
647 }
648
649 EnergyEvent e(i, helper);
650 e.demand = demands_helper->Demands()[i];
651 e.demand_min = demands_helper->DemandMin(i);
652 e.decomposed_energy = demands_helper->DecomposedEnergies()[i];
653 if (e.decomposed_energy.size() == 1) {
654 // We know it was propagated correctly. We can remove this field.
655 e.decomposed_energy.clear();
656 }
657 e.energy_min = demands_helper->EnergyMin(i);
658 e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i);
659 if (!helper->IsPresent(i)) {
661 }
662 // We can always skip events.
663 if (!e.FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
664 events.push_back(e);
665 }
666
667 if (makespan.has_value() && integer_trail->IsFixed(capacity)) {
669 "CumulativeEnergyM", lp_values, events,
670 integer_trail->FixedValue(capacity), makespan.value(), time_limit,
671 model, manager);
672
673 } else {
674 GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, events,
675 capacity, time_limit, model, manager);
676 }
677 return true;
678 };
679
680 return result;
681}
682
685 const std::optional<AffineExpression>& makespan, Model* model) {
686 CutGenerator result;
687 result.only_run_at_level_zero = true;
689 helper, model, &result.vars,
691 if (makespan.has_value() && !makespan.value().IsConstant()) {
692 result.vars.push_back(makespan.value().var);
693 }
696
697 result.generate_cuts = [makespan, helper, time_limit,
698 model](LinearConstraintManager* manager) {
699 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
700
701 const auto& lp_values = manager->LpValues();
702 std::vector<EnergyEvent> events;
703 for (int i = 0; i < helper->NumTasks(); ++i) {
704 if (helper->IsAbsent(i)) continue;
705 if (helper->SizeMin(i) == 0) continue;
706
707 EnergyEvent e(i, helper);
708 e.demand = IntegerValue(1);
709 e.demand_min = IntegerValue(1);
710 e.energy_min = e.size_min;
711 if (!helper->IsPresent(i)) {
713 }
714 // We can always skip events.
715 if (!e.FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
716 events.push_back(e);
717 }
718
719 if (makespan.has_value()) {
721 "NoOverlapEnergyM", lp_values, events,
722 /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model,
723 manager);
724 } else {
725 GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, events,
726 /*capacity=*/IntegerValue(1), time_limit,
727 model, manager);
728 }
729 return true;
730 };
731 return result;
732}
733
735 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
736 const AffineExpression& capacity, Model* model) {
737 CutGenerator result;
738 result.only_run_at_level_zero = true;
739 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
740 &result.vars);
741 AddIntegerVariableFromIntervals(helper, model, &result.vars,
744
745 struct TimeTableEvent {
746 int interval_index;
747 IntegerValue time;
748 LinearExpression demand;
749 double demand_lp = 0.0;
750 bool is_positive = false;
751 bool use_decomposed_energy_min = false;
752 bool is_optional = false;
753 };
754
755 result.generate_cuts = [helper, capacity, demands_helper,
756 model](LinearConstraintManager* manager) {
757 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
758 if (!demands_helper->CacheAllEnergyValues()) return true;
759
760 TopNCuts top_n_cuts(5);
761 std::vector<TimeTableEvent> events;
762 const auto& lp_values = manager->LpValues();
763 if (lp_values.empty()) return true; // No linear relaxation.
764 const double capacity_lp = capacity.LpValue(lp_values);
765
766 // Iterate through the intervals. If start_max < end_min, the demand
767 // is mandatory.
768 for (int i = 0; i < helper->NumTasks(); ++i) {
769 if (helper->IsAbsent(i)) continue;
770
771 const IntegerValue start_max = helper->StartMax(i);
772 const IntegerValue end_min = helper->EndMin(i);
773
774 if (start_max >= end_min) continue;
775
776 TimeTableEvent e1;
777 e1.interval_index = i;
778 e1.time = start_max;
779 {
780 LinearConstraintBuilder builder(model);
781 // Ignore the interval if the linearized demand fails.
782 if (!demands_helper->AddLinearizedDemand(i, &builder)) continue;
783 e1.demand = builder.BuildExpression();
784 }
785 e1.demand_lp = e1.demand.LpValue(lp_values);
786 e1.is_positive = true;
787 e1.use_decomposed_energy_min =
788 !demands_helper->DecomposedEnergies()[i].empty();
789 e1.is_optional = !helper->IsPresent(i);
790
791 TimeTableEvent e2 = e1;
792 e2.time = end_min;
793 e2.is_positive = false;
794
795 events.push_back(e1);
796 events.push_back(e2);
797 }
798
799 // Sort events by time.
800 // It is also important that all positive event with the same time as
801 // negative events appear after for the correctness of the algo below.
802 std::stable_sort(events.begin(), events.end(),
803 [](const TimeTableEvent& i, const TimeTableEvent& j) {
804 return std::tie(i.time, i.is_positive) <
805 std::tie(j.time, j.is_positive);
806 });
807
808 double sum_of_demand_lp = 0.0;
809 bool positive_event_added_since_last_check = false;
810 for (int i = 0; i < events.size(); ++i) {
811 const TimeTableEvent& e = events[i];
812 if (e.is_positive) {
813 positive_event_added_since_last_check = true;
814 sum_of_demand_lp += e.demand_lp;
815 continue;
816 }
817
818 if (positive_event_added_since_last_check) {
819 // Reset positive event added. We do not want to create cuts for
820 // each negative event in sequence.
821 positive_event_added_since_last_check = false;
822
823 if (sum_of_demand_lp >= capacity_lp + kMinCutViolation) {
824 // Create cut.
825 bool use_decomposed_energy_min = false;
826 bool use_optional = false;
827 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
828 cut.AddTerm(capacity, IntegerValue(-1));
829 // The i-th event, which is a negative event, follows a positive
830 // event. We must ignore it in our cut generation.
831 DCHECK(!events[i].is_positive);
832 const IntegerValue time_point = events[i - 1].time;
833
834 for (int j = 0; j < i; ++j) {
835 const TimeTableEvent& cut_event = events[j];
836 const int t = cut_event.interval_index;
837 DCHECK_LE(helper->StartMax(t), time_point);
838 if (!cut_event.is_positive || helper->EndMin(t) <= time_point) {
839 continue;
840 }
841
842 cut.AddLinearExpression(cut_event.demand, IntegerValue(1));
843 use_decomposed_energy_min |= cut_event.use_decomposed_energy_min;
844 use_optional |= cut_event.is_optional;
845 }
846
847 std::string cut_name = "CumulativeTimeTable";
848 if (use_optional) cut_name += "_optional";
849 if (use_decomposed_energy_min) cut_name += "_energy";
850 top_n_cuts.AddCut(cut.Build(), cut_name, lp_values);
851 }
852 }
853
854 // The demand_lp was added in case of a positive event. We need to
855 // remove it for a negative event.
856 sum_of_demand_lp -= e.demand_lp;
857 }
858 top_n_cuts.TransferToManager(manager);
859 return true;
860 };
861 return result;
862}
863
864// Cached Information about one interval.
865// Note that everything must correspond to level zero bounds, otherwise the
866// generated cut are not valid.
867
870 : start_min(helper->StartMin(t)),
871 start_max(helper->StartMax(t)),
872 start(helper->Starts()[t]),
873 end_min(helper->EndMin(t)),
874 end_max(helper->EndMax(t)),
875 end(helper->Ends()[t]),
876 size_min(helper->SizeMin(t)) {}
877
878 IntegerValue start_min;
879 IntegerValue start_max;
881 IntegerValue end_min;
882 IntegerValue end_max;
884 IntegerValue size_min;
885
886 IntegerValue demand_min;
887};
888
890 absl::string_view cut_name, bool ignore_zero_size_intervals,
892 std::vector<CachedIntervalData> events, IntegerValue capacity_max,
893 Model* model, LinearConstraintManager* manager) {
894 TopNCuts top_n_cuts(5);
895 const int num_events = events.size();
896 if (num_events <= 1) return;
897
898 std::stable_sort(
899 events.begin(), events.end(),
900 [](const CachedIntervalData& e1, const CachedIntervalData& e2) {
901 return e1.start_min < e2.start_min ||
902 (e1.start_min == e2.start_min && e1.end_max < e2.end_max);
903 });
904
905 // Balas disjunctive cuts on 2 tasks a and b:
906 // start_1 * (duration_1 + start_min_1 - start_min_2) +
907 // start_2 * (duration_2 + start_min_2 - start_min_1) >=
908 // duration_1 * duration_2 +
909 // start_min_1 * duration_2 +
910 // start_min_2 * duration_1
911 // From: David L. Applegate, William J. Cook:
912 // A Computational Study of the Job-Shop Scheduling Problem. 149-156
913 // INFORMS Journal on Computing, Volume 3, Number 1, Winter 1991
914 const auto add_balas_disjunctive_cut =
915 [&](absl::string_view local_cut_name, IntegerValue start_min_1,
916 IntegerValue duration_min_1, AffineExpression start_1,
917 IntegerValue start_min_2, IntegerValue duration_min_2,
918 AffineExpression start_2) {
919 // Checks hypothesis from the cut.
920 if (start_min_2 >= start_min_1 + duration_min_1 ||
921 start_min_1 >= start_min_2 + duration_min_2) {
922 return;
923 }
924 const IntegerValue coeff_1 = duration_min_1 + start_min_1 - start_min_2;
925 const IntegerValue coeff_2 = duration_min_2 + start_min_2 - start_min_1;
926 const IntegerValue rhs = duration_min_1 * duration_min_2 +
927 duration_min_1 * start_min_2 +
928 duration_min_2 * start_min_1;
929
930 if (ToDouble(coeff_1) * start_1.LpValue(lp_values) +
931 ToDouble(coeff_2) * start_2.LpValue(lp_values) <=
932 ToDouble(rhs) - kMinCutViolation) {
934 cut.AddTerm(start_1, coeff_1);
935 cut.AddTerm(start_2, coeff_2);
936 top_n_cuts.AddCut(cut.Build(), local_cut_name, lp_values);
937 }
938 };
939
940 for (int i = 0; i + 1 < num_events; ++i) {
941 const CachedIntervalData& e1 = events[i];
942 for (int j = i + 1; j < num_events; ++j) {
943 const CachedIntervalData& e2 = events[j];
944 if (e2.start_min >= e1.end_max) break; // Break out of the index2 loop.
945
946 // Encode only the interesting pairs.
947 if (e1.demand_min + e2.demand_min <= capacity_max) continue;
948
949 if (ignore_zero_size_intervals &&
950 (e1.size_min <= 0 || e2.size_min <= 0)) {
951 continue;
952 }
953
954 const bool interval_1_can_precede_2 = e1.end_min <= e2.start_max;
955 const bool interval_2_can_precede_1 = e2.end_min <= e1.start_max;
956
957 if (interval_1_can_precede_2 && !interval_2_can_precede_1 &&
958 e1.end.LpValue(lp_values) >=
959 e2.start.LpValue(lp_values) + kMinCutViolation) {
960 // interval_1.end <= interval_2.start
961 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
962 cut.AddTerm(e1.end, IntegerValue(1));
963 cut.AddTerm(e2.start, IntegerValue(-1));
964 top_n_cuts.AddCut(cut.Build(),
965 absl::StrCat(cut_name, "DetectedPrecedence"),
966 lp_values);
967 } else if (interval_2_can_precede_1 && !interval_1_can_precede_2 &&
968 e2.end.LpValue(lp_values) >=
969 e1.start.LpValue(lp_values) + kMinCutViolation) {
970 // interval_2.end <= interval_1.start
971 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
972 cut.AddTerm(e2.end, IntegerValue(1));
973 cut.AddTerm(e1.start, IntegerValue(-1));
974 top_n_cuts.AddCut(cut.Build(),
975 absl::StrCat(cut_name, "DetectedPrecedence"),
976 lp_values);
977 } else {
978 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnStart"),
979 e1.start_min, e1.size_min, e1.start,
980 e2.start_min, e2.size_min, e2.start);
981 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnEnd"),
982 -e1.end_max, e1.size_min, e1.end.Negated(),
983 -e2.end_max, e2.size_min, e2.end.Negated());
984 }
985 }
986 }
987
988 top_n_cuts.TransferToManager(manager);
989}
990
992 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
993 const AffineExpression& capacity, Model* model) {
994 CutGenerator result;
995 result.only_run_at_level_zero = true;
996 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
997 &result.vars);
999 helper, model, &result.vars,
1002
1003 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1004 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
1005 model](LinearConstraintManager* manager) {
1006 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1007
1008 std::vector<CachedIntervalData> events;
1009 for (int t = 0; t < helper->NumTasks(); ++t) {
1010 if (!helper->IsPresent(t)) continue;
1011 CachedIntervalData event(t, helper);
1012 event.demand_min = demands_helper->DemandMin(t);
1013 events.push_back(event);
1014 }
1015
1016 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
1018 "Cumulative", /* ignore_zero_size_intervals= */ true,
1019 manager->LpValues(), std::move(events), capacity_max, model, manager);
1020 return true;
1021 };
1022 return result;
1023}
1024
1026 SchedulingConstraintHelper* helper, Model* model) {
1027 CutGenerator result;
1028 result.only_run_at_level_zero = true;
1030 helper, model, &result.vars,
1033
1034 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
1035 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1036
1037 std::vector<CachedIntervalData> events;
1038 for (int t = 0; t < helper->NumTasks(); ++t) {
1039 if (!helper->IsPresent(t)) continue;
1040 CachedIntervalData event(t, helper);
1041 event.demand_min = IntegerValue(1);
1042 events.push_back(event);
1043 }
1044
1046 "NoOverlap", /* ignore_zero_size_intervals= */ false,
1047 manager->LpValues(), std::move(events), IntegerValue(1), model,
1048 manager);
1049 return true;
1050 };
1051
1052 return result;
1053}
1054
1057 SchedulingDemandHelper* demands_helper)
1058 : task_index(t),
1059 start_min(helper->StartMin(t)),
1060 start_max(helper->StartMax(t)),
1061 end_min(helper->EndMin(t)),
1062 end_max(helper->EndMax(t)),
1063 size_min(helper->SizeMin(t)),
1064 start(helper->Starts()[t]),
1065 end(helper->Ends()[t]) {
1066 if (demands_helper == nullptr) {
1067 demand_min = 1;
1068 demand_is_fixed = true;
1071 } else {
1072 demand_min = demands_helper->DemandMin(t);
1073 demand_is_fixed = demands_helper->DemandIsFixed(t);
1074 // Default values for energy. Will be updated if decomposed energy is
1075 // not empty.
1078 decomposed_energy = demands_helper->DecomposedEnergies()[t];
1079 if (decomposed_energy.size() == 1) {
1080 // We know everything is propagated, we can remove this field.
1081 decomposed_energy.clear();
1082 }
1083 }
1084}
1085
1087 return absl::StrCat(
1088 "CompletionTimeEvent(task_index = ", task_index,
1089 ", start_min = ", start_min, ", start_max = ", start_max,
1090 ", size_min = ", size_min, ", end = ", end.DebugString(),
1091 ", lp_end = ", lp_end, ", size_min = ", size_min,
1092 " demand_min = ", demand_min, ", demand_is_fixed = ", demand_is_fixed,
1093 ", energy_min = ", energy_min,
1094 ", use_decomposed_energy_min = ", use_decomposed_energy_min,
1095 ", lifted = ", lifted, ", decomposed_energy = [",
1096 absl::StrJoin(decomposed_energy, ", ",
1097 [](std::string* out, const LiteralValueValue& e) {
1098 absl::StrAppend(out, e.left_value, " * ", e.right_value);
1099 }),
1100 "]");
1101}
1102
1104 const absl::Span<const CompletionTimeEvent> events, Model* model) {
1105 max_task_index_ = 0;
1106 if (events.empty()) return;
1107 // We compute the max_task_index_ from the events early to avoid sorting
1108 // the events if there are too many of them.
1109 for (const auto& event : events) {
1110 max_task_index_ = std::max(max_task_index_, event.task_index);
1111 }
1112 if (events.size() > 100) return;
1113
1114 BinaryRelationsMaps* binary_relations =
1116
1117 std::vector<CompletionTimeEvent> sorted_events(events.begin(), events.end());
1118 std::sort(sorted_events.begin(), sorted_events.end(),
1119 [](const CompletionTimeEvent& a, const CompletionTimeEvent& b) {
1120 return a.task_index < b.task_index;
1121 });
1122 predecessors_.reserve(max_task_index_ + 1);
1123 for (const auto& e1 : sorted_events) {
1124 for (const auto& e2 : sorted_events) {
1125 if (e2.task_index == e1.task_index) continue;
1126 if (binary_relations->GetLevelZeroPrecedenceStatus(e2.end, e1.start) ==
1128 while (predecessors_.size() <= e1.task_index) predecessors_.Add({});
1129 predecessors_.AppendToLastVector(e2.task_index);
1130 }
1131 }
1132 }
1133 VLOG(2) << "num_tasks:" << max_task_index_ + 1
1134 << " num_precedences:" << predecessors_.num_entries()
1135 << " predecessors size:" << predecessors_.size();
1136}
1137
1139 absl::Span<const CompletionTimeEvent> events,
1140 absl::Span<const int> permutation) {
1141 if (predecessors_.num_entries() == 0) return true;
1142 visited_.assign(max_task_index_ + 1, false);
1143 for (int i = permutation.size() - 1; i >= 0; --i) {
1144 const CompletionTimeEvent& event = events[permutation[i]];
1145 if (event.task_index >= predecessors_.size()) continue;
1146 for (const int predecessor : predecessors_[event.task_index]) {
1147 if (visited_[predecessor]) return false;
1148 }
1149 visited_[event.task_index] = true;
1150 }
1151 return true;
1152}
1153
1154namespace {
1155
1156bool ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap(
1157 absl::Span<const CompletionTimeEvent> events,
1158 absl::Span<const int> permutation, IntegerValue& sum_of_ends,
1159 IntegerValue& sum_of_weighted_ends) {
1160 // Reset the two sums.
1161 sum_of_ends = 0;
1162 sum_of_weighted_ends = 0;
1163
1164 // Loop over the permutation.
1165 IntegerValue end_min_of_previous_task = kMinIntegerValue;
1166 for (const int index : permutation) {
1167 const CompletionTimeEvent& event = events[index];
1168 const IntegerValue task_start_min =
1169 std::max(event.start_min, end_min_of_previous_task);
1170 if (event.start_max < task_start_min) return false; // Infeasible.
1171
1172 end_min_of_previous_task = task_start_min + event.size_min;
1173 sum_of_ends += end_min_of_previous_task;
1174 sum_of_weighted_ends += event.energy_min * end_min_of_previous_task;
1175 }
1176 return true;
1177}
1178
1179// This functions packs all events in a cumulative of capacity 'capacity_max'
1180// following the given permutation. It returns the sum of end mins and the sum
1181// of end mins weighted by event.energy_min.
1182//
1183// It ensures that if event_j is after event_i in the permutation, then
1184// event_j starts exactly at the same time or after event_i.
1185//
1186// It returns false if one event cannot start before event.start_max.
1187bool ComputeWeightedSumOfEndMinsOfOnePermutation(
1188 absl::Span<const CompletionTimeEvent> events,
1189 absl::Span<const int> permutation, IntegerValue capacity_max,
1190 CtExhaustiveHelper& helper, IntegerValue& sum_of_ends,
1191 IntegerValue& sum_of_weighted_ends, bool& cut_use_precedences) {
1192 DCHECK_EQ(permutation.size(), events.size());
1193
1194 if (capacity_max == 1) {
1195 return ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap(
1196 events, permutation, sum_of_ends, sum_of_weighted_ends);
1197 }
1198
1199 // Reset the two sums.
1200 sum_of_ends = 0;
1201 sum_of_weighted_ends = 0;
1202
1203 // Quick check to see if the permutation feasible:
1204 // ei = events[permutation[i]], ej = events[permutation[j]], i < j
1205 // - start_max(ej) >= start_min(ei)
1206 IntegerValue demand_min_of_previous_task = 0;
1207 IntegerValue start_min_of_previous_task = kMinIntegerValue;
1208 IntegerValue end_min_of_previous_task = kMinIntegerValue;
1209 for (const int index : permutation) {
1210 const CompletionTimeEvent& event = events[index];
1211 const IntegerValue threshold =
1212 std::max(event.start_min,
1213 (event.demand_min + demand_min_of_previous_task > capacity_max)
1214 ? end_min_of_previous_task
1215 : start_min_of_previous_task);
1216 if (event.start_max < threshold) {
1217 return false;
1218 }
1219
1220 start_min_of_previous_task = threshold;
1221 end_min_of_previous_task = threshold + event.size_min;
1222 demand_min_of_previous_task = event.demand_min;
1223 }
1224
1225 // The profile (and new profile) is a set of (time, capa_left) pairs,
1226 // ordered by increasing time and capa_left.
1227 helper.profile_.clear();
1228 helper.profile_.emplace_back(kMinIntegerValue, capacity_max);
1229 helper.profile_.emplace_back(kMaxIntegerValue, capacity_max);
1230
1231 // Loop over the permutation.
1232 helper.assigned_ends_.assign(helper.max_task_index() + 1, kMinIntegerValue);
1233 IntegerValue start_of_previous_task = kMinIntegerValue;
1234 for (const int index : permutation) {
1235 const CompletionTimeEvent& event = events[index];
1236 const IntegerValue start_min =
1237 std::max(event.start_min, start_of_previous_task);
1238
1239 // Iterate on the profile to find the step that contains start_min.
1240 // Then push until we find a step with enough capacity.
1241 auto profile_it = helper.profile_.begin();
1242 while ((profile_it + 1)->first <= start_min ||
1243 profile_it->second < event.demand_min) {
1244 ++profile_it;
1245 }
1246
1247 IntegerValue actual_start = std::max(start_min, profile_it->first);
1248 const IntegerValue initial_start_min = actual_start;
1249
1250 // Propagate precedences.
1251 //
1252 // helper.predecessors() can be truncated. We need to be careful here.
1253 if (event.task_index < helper.predecessors().size()) {
1254 for (const int predecessor : helper.predecessors()[event.task_index]) {
1255 if (helper.assigned_ends_[predecessor] == kMinIntegerValue) continue;
1256 actual_start =
1257 std::max(actual_start, helper.assigned_ends_[predecessor]);
1258 }
1259 }
1260
1261 if (actual_start > initial_start_min) {
1262 cut_use_precedences = true;
1263 // Catch up the position on the profile w.r.t. the actual start.
1264 while ((profile_it + 1)->first <= actual_start) ++profile_it;
1265 VLOG(3) << "push from " << initial_start_min << " to " << actual_start;
1266 }
1267
1268 // Compatible with the event.start_max ?
1269 if (actual_start > event.start_max) {
1270 return false;
1271 }
1272
1273 const IntegerValue actual_end = actual_start + event.size_min;
1274
1275 // Bookkeeping.
1276 helper.assigned_ends_[event.task_index] = actual_end;
1277 sum_of_ends += actual_end;
1278 sum_of_weighted_ends += event.energy_min * actual_end;
1279 start_of_previous_task = actual_start;
1280
1281 // No need to update the profile on the last loop.
1282 if (event.task_index == events[permutation.back()].task_index) break;
1283
1284 // Update the profile.
1285 helper.new_profile_.clear();
1286 const IntegerValue demand_min = event.demand_min;
1287
1288 // Insert the start of the shifted profile.
1289 helper.new_profile_.push_back(
1290 {actual_start, profile_it->second - demand_min});
1291 ++profile_it;
1292
1293 // Copy and modify the part of the profile impacted by the current event.
1294 while (profile_it->first < actual_end) {
1295 helper.new_profile_.push_back(
1296 {profile_it->first, profile_it->second - demand_min});
1297 ++profile_it;
1298 }
1299
1300 // Insert a new event in the profile at the end of the task if needed.
1301 if (profile_it->first > actual_end) {
1302 helper.new_profile_.push_back({actual_end, (profile_it - 1)->second});
1303 }
1304
1305 // Insert the tail of the current profile.
1306 helper.new_profile_.insert(helper.new_profile_.end(), profile_it,
1307 helper.profile_.end());
1308
1309 helper.profile_.swap(helper.new_profile_);
1310 }
1311 return true;
1312}
1313
1314const int kCtExhaustiveTargetSize = 6;
1315// This correspond to the number of permutations the system will explore when
1316// fully exploring all possible sizes and all possible permutations for up to 6
1317// tasks, without any precedence.
1318const int kExplorationLimit = 873; // 1! + 2! + 3! + 4! + 5! + 6!
1319
1320} // namespace
1321
1323 absl::Span<const CompletionTimeEvent> events, IntegerValue capacity_max,
1324 double unweighted_threshold, double weighted_threshold,
1325 CtExhaustiveHelper& helper, double& min_sum_of_ends,
1326 double& min_sum_of_weighted_ends, bool& cut_use_precedences,
1327 int& exploration_credit) {
1328 // Reset the events based sums.
1329 min_sum_of_ends = std::numeric_limits<double>::max();
1330 min_sum_of_weighted_ends = std::numeric_limits<double>::max();
1331 helper.task_to_index_.assign(helper.max_task_index() + 1, -1);
1332 for (int i = 0; i < events.size(); ++i) {
1333 helper.task_to_index_[events[i].task_index] = i;
1334 }
1335 helper.valid_permutation_iterator_.Reset(events.size());
1336 const auto& predecessors = helper.predecessors();
1337 for (int i = 0; i < events.size(); ++i) {
1338 const int task_i = events[i].task_index;
1339 if (task_i >= predecessors.size()) continue;
1340 for (const int task_j : predecessors[task_i]) {
1341 const int j = helper.task_to_index_[task_j];
1342 if (j != -1) {
1344 }
1345 }
1346 }
1347
1348 bool is_dag = false;
1349 int num_valid_permutations = 0;
1350 for (const auto& permutation : helper.valid_permutation_iterator_) {
1351 is_dag = true;
1352 if (--exploration_credit < 0) break;
1353
1354 IntegerValue sum_of_ends = 0;
1355 IntegerValue sum_of_weighted_ends = 0;
1356
1357 if (ComputeWeightedSumOfEndMinsOfOnePermutation(
1358 events, permutation, capacity_max, helper, sum_of_ends,
1359 sum_of_weighted_ends, cut_use_precedences)) {
1360 min_sum_of_ends = std::min(ToDouble(sum_of_ends), min_sum_of_ends);
1361 min_sum_of_weighted_ends =
1362 std::min(ToDouble(sum_of_weighted_ends), min_sum_of_weighted_ends);
1363 ++num_valid_permutations;
1364
1365 if (min_sum_of_ends <= unweighted_threshold &&
1366 min_sum_of_weighted_ends <= weighted_threshold) {
1367 break;
1368 }
1369 }
1370 }
1371 if (!is_dag) {
1373 }
1374 const CompletionTimeExplorationStatus status =
1375 exploration_credit < 0 ? CompletionTimeExplorationStatus::ABORTED
1376 : num_valid_permutations > 0
1379 VLOG(2) << "DP: size:" << events.size()
1380 << ", num_valid_permutations:" << num_valid_permutations
1381 << ", min_sum_of_end_mins:" << min_sum_of_ends
1382 << ", min_sum_of_weighted_end_mins:" << min_sum_of_weighted_ends
1383 << ", unweighted_threshold:" << unweighted_threshold
1384 << ", weighted_threshold:" << weighted_threshold
1385 << ", exploration_credit:" << exploration_credit
1386 << ", status:" << status;
1387 return status;
1388}
1389
1390// TODO(user): Improve performance
1391// - detect disjoint tasks (no need to crossover to the second part)
1392// - better caching of explored states
1394 absl::string_view cut_name, std::vector<CompletionTimeEvent> events,
1395 IntegerValue capacity_max, CtExhaustiveHelper& helper, Model* model,
1396 LinearConstraintManager* manager) {
1397 TopNCuts top_n_cuts(5);
1398
1399 // Sort by start min to bucketize by start_min.
1400 std::sort(
1401 events.begin(), events.end(),
1402 [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) {
1403 return std::tie(e1.start_min, e1.energy_min, e1.lp_end, e1.task_index) <
1404 std::tie(e2.start_min, e2.energy_min, e2.lp_end, e2.task_index);
1405 });
1406 for (int start = 0; start + 1 < events.size(); ++start) {
1407 // Skip to the next start_min value.
1408 if (start > 0 && events[start].start_min == events[start - 1].start_min) {
1409 continue;
1410 }
1411
1412 bool cut_use_precedences = false; // Used for naming the cut.
1413 const IntegerValue sequence_start_min = events[start].start_min;
1414 helper.residual_events_.assign(events.begin() + start, events.end());
1415
1416 // We look at events that start before sequence_start_min, but are forced
1417 // to cross this time point.
1418 for (int before = 0; before < start; ++before) {
1419 if (events[before].start_min + events[before].size_min >
1420 sequence_start_min) {
1421 helper.residual_events_.push_back(events[before]); // Copy.
1422 helper.residual_events_.back().lifted = true;
1423 }
1424 }
1425
1426 std::sort(helper.residual_events_.begin(), helper.residual_events_.end(),
1427 [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) {
1428 return std::tie(e1.lp_end, e1.task_index) <
1429 std::tie(e2.lp_end, e2.task_index);
1430 });
1431
1432 double sum_of_ends_lp = 0.0;
1433 double sum_of_weighted_ends_lp = 0.0;
1434 IntegerValue sum_of_demands = 0;
1435 double sum_of_square_energies = 0;
1436 double min_sum_of_ends = std::numeric_limits<double>::max();
1437 double min_sum_of_weighted_ends = std::numeric_limits<double>::max();
1438 int exploration_limit = kExplorationLimit;
1439 const int kMaxSize = std::min<int>(helper.residual_events_.size(), 12);
1440
1441 for (int i = 0; i < kMaxSize; ++i) {
1442 const CompletionTimeEvent& event = helper.residual_events_[i];
1443 const double energy = ToDouble(event.energy_min);
1444 sum_of_ends_lp += event.lp_end;
1445 sum_of_weighted_ends_lp += event.lp_end * energy;
1446 sum_of_demands += event.demand_min;
1447 sum_of_square_energies += energy * energy;
1448
1449 // Both cases with 1 or 2 tasks are trivial and independent of the order.
1450 // Also, if the sum of demands is less than or equal to the capacity,
1451 // pushing all ends left is a valid LP assignment. And this assignment
1452 // should be propagated by the lp model.
1453 if (i <= 1 || sum_of_demands <= capacity_max) continue;
1454
1455 absl::Span<const CompletionTimeEvent> tasks_to_explore =
1456 absl::MakeSpan(helper.residual_events_).first(i + 1);
1457 const CompletionTimeExplorationStatus status =
1459 tasks_to_explore, capacity_max,
1460 /* unweighted_threshold= */ sum_of_ends_lp + kMinCutViolation,
1461 /* weighted_threshold= */ sum_of_weighted_ends_lp +
1462 kMinCutViolation,
1463 helper, min_sum_of_ends, min_sum_of_weighted_ends,
1464 cut_use_precedences, exploration_limit);
1466 // TODO(user): We should return false here but there is a bug.
1467 break;
1468 } else if (status == CompletionTimeExplorationStatus::ABORTED) {
1469 break;
1470 }
1471
1472 const double unweigthed_violation =
1473 (min_sum_of_ends - sum_of_ends_lp) / std::sqrt(ToDouble(i + 1));
1474 const double weighted_violation =
1475 (min_sum_of_weighted_ends - sum_of_weighted_ends_lp) /
1476 std::sqrt(sum_of_square_energies);
1477
1478 // Unweighted cuts.
1479 if (unweigthed_violation > weighted_violation &&
1480 unweigthed_violation > kMinCutViolation) {
1481 LinearConstraintBuilder cut(model, min_sum_of_ends, kMaxIntegerValue);
1482 bool is_lifted = false;
1483 for (int j = 0; j <= i; ++j) {
1484 const CompletionTimeEvent& event = helper.residual_events_[j];
1485 is_lifted |= event.lifted;
1486 cut.AddTerm(event.end, IntegerValue(1));
1487 }
1488 std::string full_name(cut_name);
1489 if (cut_use_precedences) full_name.append("_prec");
1490 if (is_lifted) full_name.append("_lifted");
1491 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1492 }
1493
1494 // Weighted cuts.
1495 if (weighted_violation >= unweigthed_violation &&
1496 weighted_violation > kMinCutViolation) {
1497 LinearConstraintBuilder cut(model, min_sum_of_weighted_ends,
1499 bool is_lifted = false;
1500 for (int j = 0; j <= i; ++j) {
1501 const CompletionTimeEvent& event = helper.residual_events_[j];
1502 is_lifted |= event.lifted;
1503 cut.AddTerm(event.end, event.energy_min);
1504 }
1505 std::string full_name(cut_name);
1506 if (is_lifted) full_name.append("_lifted");
1507 if (cut_use_precedences) full_name.append("_prec");
1508 full_name.append("_weighted");
1509 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1510 }
1511 }
1512 }
1513 top_n_cuts.TransferToManager(manager);
1514 return true;
1515}
1516
1517namespace {
1518
1519// Returns a copy of the event with the start time increased to time.
1520// Energy (min and decomposed) are updated accordingly.
1521CompletionTimeEvent CopyAndTrimEventAfter(const CompletionTimeEvent& old_event,
1522 IntegerValue time) {
1523 CHECK_GT(time, old_event.start_min);
1524 CHECK_GT(old_event.start_min + old_event.size_min, time);
1525 CompletionTimeEvent event = old_event; // Copy.
1526 event.lifted = true;
1527
1528 // Trim the decomposed energy and compute the energy min in the window.
1529 const IntegerValue shift = time - event.start_min;
1530 CHECK_GT(shift, IntegerValue(0));
1531 event.size_min -= shift;
1532 event.energy_min = event.size_min * event.demand_min;
1533 if (!event.decomposed_energy.empty()) {
1534 // Trim fixed sizes and recompute the decomposed energy min.
1535 IntegerValue propagated_energy_min = kMaxIntegerValue;
1536 for (auto& [literal, fixed_size, fixed_demand] : event.decomposed_energy) {
1537 CHECK_GT(fixed_size, shift);
1538 fixed_size -= shift;
1539 propagated_energy_min =
1540 std::min(propagated_energy_min, fixed_demand * fixed_size);
1541 }
1542
1543 DCHECK_GT(propagated_energy_min, 0);
1544 if (propagated_energy_min > event.energy_min) {
1545 event.use_decomposed_energy_min = true;
1546 event.energy_min = propagated_energy_min;
1547 } else {
1548 event.use_decomposed_energy_min = false;
1549 }
1550 }
1551 event.start_min = time;
1552 return event;
1553}
1554
1555// Collects all possible demand values for the event, and adds them to the
1556// subset sum of the reachable capacity of the cumulative constraint.
1557void AddEventDemandsToCapacitySubsetSum(
1558 const CompletionTimeEvent& event, const VariablesAssignment& assignment,
1559 IntegerValue capacity_max, std::vector<int64_t>& tmp_possible_demands,
1560 MaxBoundedSubsetSum& dp) {
1561 if (dp.CurrentMax() != capacity_max) {
1562 if (event.demand_is_fixed) {
1563 dp.Add(event.demand_min.value());
1564 } else if (!event.decomposed_energy.empty()) {
1565 tmp_possible_demands.clear();
1566 for (const auto& [literal, size, demand] : event.decomposed_energy) {
1567 if (assignment.LiteralIsFalse(literal)) continue;
1568 if (assignment.LiteralIsTrue(literal)) {
1569 tmp_possible_demands.assign({demand.value()});
1570 break;
1571 }
1572 tmp_possible_demands.push_back(demand.value());
1573 }
1574 dp.AddChoices(tmp_possible_demands);
1575 } else { // event.demand_min is not fixed, we abort.
1576 // TODO(user): We could still add all events in the range if the range
1577 // is small.
1578 dp.Add(capacity_max.value());
1579 }
1580 }
1581}
1582
1583} // namespace
1584
1585// We generate the cut from the Smith's rule from:
1586// M. Queyranne, Structure of a simple scheduling polyhedron,
1587// Mathematical Programming 58 (1993), 263–285
1588//
1589// The original cut is:
1590// sum(end_min_i * size_min_i) >=
1591// (sum(size_min_i^2) + sum(size_min_i)^2) / 2
1592// We strengthen this cuts by noticing that if all tasks starts after S,
1593// then replacing end_min_i by (end_min_i - S) is still valid.
1594//
1595// A second difference is that we lift intervals that starts before a given
1596// value, but are forced to cross it. This lifting procedure implies trimming
1597// interval to its part that is after the given value.
1598//
1599// In the case of a cumulative constraint with a capacity of C, we compute a
1600// valid equation by splitting the task (size_min si, demand_min di) into di
1601// tasks of size si and demand 1, that we spread across C no_overlap
1602// constraint. When doing so, the lhs of the equation is the same, the first
1603// term of the rhs is also unchanged. A lower bound of the second term of the
1604// rhs is reached when the split is exact (each no_overlap sees a long demand of
1605// sum(si * di / C). Thus the second term is greater or equal to
1606// (sum (si * di) ^ 2) / (2 * C)
1607//
1608// Sometimes, the min energy of the task i is greater than si * di.
1609// Let's introduce ai the minimum energy of the task and rewrite the previous
1610// equation. In that new setting, we can rewrite the cumulative transformation
1611// by splitting each tasks into at least di tasks of size at least si and demand
1612// 1.
1613//
1614// In that setting, the lhs is rewritten as sum(ai * ei) and the second term of
1615// the rhs is rewritten as sum(ai) ^ 2 / (2 * C).
1616//
1617// The question is how to rewrite the term `sum(di * si * si). The minimum
1618// contribution is when the task has size si and demand ai / si. (note that si
1619// is the minimum size of the task, and di its minimum demand). We can
1620// replace the small rectangle area term by ai * si.
1621//
1622// sum (ai * ei) - sum (ai) * current_start_min >=
1623// sum(si * ai) / 2 + (sum (ai) ^ 2) / (2 * C)
1624//
1625// The separation procedure is implemented using two loops:
1626// - first, we loop on potential start times in increasing order.
1627// - second loop, we add tasks that must contribute after this start time
1628// ordered by increasing end time in the LP relaxation.
1630 absl::string_view cut_name, std::vector<CompletionTimeEvent> events,
1631 IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) {
1632 TopNCuts top_n_cuts(5);
1633 const VariablesAssignment& assignment =
1634 model->GetOrCreate<Trail>()->Assignment();
1635 std::vector<int64_t> tmp_possible_demands;
1636
1637 // Sort by start min to bucketize by start_min.
1638 std::stable_sort(
1639 events.begin(), events.end(),
1640 [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) {
1641 return std::tie(e1.start_min, e1.demand_min, e1.lp_end) <
1642 std::tie(e2.start_min, e2.demand_min, e2.lp_end);
1643 });
1644
1645 // First loop: we loop on potential start times.
1646 for (int start = 0; start + 1 < events.size(); ++start) {
1647 // Skip to the next start_min value.
1648 if (start > 0 && events[start].start_min == events[start - 1].start_min) {
1649 continue;
1650 }
1651
1652 const IntegerValue sequence_start_min = events[start].start_min;
1653 std::vector<CompletionTimeEvent> residual_tasks(events.begin() + start,
1654 events.end());
1655
1656 // We look at events that start before sequence_start_min, but are forced
1657 // to cross this time point. In that case, we replace this event by a
1658 // truncated event starting at sequence_start_min. To do this, we reduce
1659 // the size_min, align the start_min with the sequence_start_min, and
1660 // scale the energy down accordingly.
1661 for (int before = 0; before < start; ++before) {
1662 if (events[before].start_min + events[before].size_min >
1663 sequence_start_min) {
1664 CompletionTimeEvent event =
1665 CopyAndTrimEventAfter(events[before], sequence_start_min);
1666 if (event.energy_min <= 0) continue;
1667 residual_tasks.push_back(std::move(event));
1668 }
1669 }
1670
1671 // If we have less than kCtExhaustiveTargetSize tasks, we are already
1672 // covered by the exhaustive cut generator.
1673 if (residual_tasks.size() <= kCtExhaustiveTargetSize) continue;
1674
1675 // Best cut so far for this loop.
1676 int best_end = -1;
1677 double best_efficacy = 0.01;
1678 IntegerValue best_min_contrib = 0;
1679 bool best_uses_subset_sum = false;
1680
1681 // Used in the first term of the rhs of the equation.
1682 IntegerValue sum_event_contributions = 0;
1683 // Used in the second term of the rhs of the equation.
1684 IntegerValue sum_energy = 0;
1685 // For normalization.
1686 IntegerValue sum_square_energy = 0;
1687
1688 double lp_contrib = 0.0;
1689 IntegerValue current_start_min(kMaxIntegerValue);
1690
1691 MaxBoundedSubsetSum dp(capacity_max.value());
1692
1693 // We will add tasks one by one, sorted by end time, and evaluate the
1694 // potential cut at each step.
1695 std::stable_sort(
1696 residual_tasks.begin(), residual_tasks.end(),
1697 [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) {
1698 return e1.lp_end < e2.lp_end;
1699 });
1700
1701 // Second loop: we add tasks one by one.
1702 for (int i = 0; i < residual_tasks.size(); ++i) {
1703 const CompletionTimeEvent& event = residual_tasks[i];
1704 DCHECK_GE(event.start_min, sequence_start_min);
1705
1706 // Make sure we do not overflow.
1707 if (!AddTo(event.energy_min, &sum_energy)) break;
1708 // In the no_overlap case, we have:
1709 // area = event.size_min ^ 2
1710 // In the simple cumulative case, we split split the task
1711 // (demand_min, size_min) into demand_min tasks in the no_overlap case.
1712 // area = event.demand_min * event.size_min * event.size_min
1713 // In the cumulative case, we can have energy_min > side_min * demand_min.
1714 // In that case, we use energy_min * size_min.
1715 if (!AddProductTo(event.energy_min, event.size_min,
1716 &sum_event_contributions)) {
1717 break;
1718 }
1719 if (!AddSquareTo(event.energy_min, &sum_square_energy)) break;
1720
1721 lp_contrib += event.lp_end * ToDouble(event.energy_min);
1722 current_start_min = std::min(current_start_min, event.start_min);
1723
1724 // Maintain the reachable capacity with a bounded complexity subset sum.
1725 AddEventDemandsToCapacitySubsetSum(event, assignment, capacity_max,
1726 tmp_possible_demands, dp);
1727
1728 // Ignore cuts covered by the exhaustive cut generator.
1729 if (i < kCtExhaustiveTargetSize) continue;
1730
1731 const IntegerValue reachable_capacity = dp.CurrentMax();
1732
1733 // Do we have a violated cut ?
1734 const IntegerValue large_rectangle_contrib =
1735 CapProdI(sum_energy, sum_energy);
1736 if (AtMinOrMaxInt64I(large_rectangle_contrib)) break;
1737 const IntegerValue mean_large_rectangle_contrib =
1738 CeilRatio(large_rectangle_contrib, reachable_capacity);
1739
1740 IntegerValue min_contrib =
1741 CapAddI(sum_event_contributions, mean_large_rectangle_contrib);
1742 if (AtMinOrMaxInt64I(min_contrib)) break;
1743 min_contrib = CeilRatio(min_contrib, 2);
1744
1745 // shift contribution by current_start_min.
1746 if (!AddProductTo(sum_energy, current_start_min, &min_contrib)) break;
1747
1748 // The efficacy of the cut is the normalized violation of the above
1749 // equation. We will normalize by the sqrt of the sum of squared energies.
1750 const double efficacy = (ToDouble(min_contrib) - lp_contrib) /
1751 std::sqrt(ToDouble(sum_square_energy));
1752
1753 // For a given start time, we only keep the best cut.
1754 // The reason is that is the cut is strongly violated, we can get a
1755 // sequence of violated cuts as we add more tasks. These new cuts will
1756 // be less violated, but will not bring anything useful to the LP
1757 // relaxation. At the same time, this sequence of cuts can push out
1758 // other cuts from a disjoint set of tasks.
1759 if (efficacy > best_efficacy) {
1760 best_efficacy = efficacy;
1761 best_end = i;
1762 best_min_contrib = min_contrib;
1763 best_uses_subset_sum = reachable_capacity < capacity_max;
1764 }
1765 }
1766
1767 // We have inserted all tasks. Have we found a violated cut ?
1768 // If so, add the most violated one to the top_n cut container.
1769 if (best_end != -1) {
1770 LinearConstraintBuilder cut(model, best_min_contrib, kMaxIntegerValue);
1771 bool is_lifted = false;
1772 bool add_energy_to_name = false;
1773 for (int i = 0; i <= best_end; ++i) {
1774 const CompletionTimeEvent& event = residual_tasks[i];
1775 is_lifted |= event.lifted;
1776 add_energy_to_name |= event.use_decomposed_energy_min;
1777 cut.AddTerm(event.end, event.energy_min);
1778 }
1779 std::string full_name(cut_name);
1780 if (add_energy_to_name) full_name.append("_energy");
1781 if (is_lifted) full_name.append("_lifted");
1782 if (best_uses_subset_sum) full_name.append("_subsetsum");
1783 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1784 }
1785 }
1786 top_n_cuts.TransferToManager(manager);
1787}
1788
1790 SchedulingConstraintHelper* helper, Model* model) {
1791 CutGenerator result;
1792 result.only_run_at_level_zero = true;
1793 AddIntegerVariableFromIntervals(helper, model, &result.vars,
1796
1797 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
1798 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1799
1800 auto generate_cuts = [model, manager,
1801 helper](bool time_is_forward) -> bool {
1802 if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) {
1803 return false;
1804 }
1805 std::vector<CompletionTimeEvent> events;
1806 const auto& lp_values = manager->LpValues();
1807 for (int index = 0; index < helper->NumTasks(); ++index) {
1808 if (!helper->IsPresent(index)) continue;
1809 const IntegerValue size_min = helper->SizeMin(index);
1810 if (size_min > 0) {
1811 CompletionTimeEvent event(index, helper, nullptr);
1812 event.lp_end = event.end.LpValue(lp_values);
1813 events.push_back(event);
1814 }
1815 }
1816
1817 CtExhaustiveHelper helper;
1818 helper.Init(events, model);
1819
1821 "NoOverlapCompletionTimeExhaustive", events,
1822 /*capacity_max=*/IntegerValue(1), helper, model, manager)) {
1823 return false;
1824 }
1825
1827 "NoOverlapCompletionTimeQueyrane", std::move(events),
1828 /*capacity_max=*/IntegerValue(1), model, manager);
1829 return true;
1830 };
1831 if (!generate_cuts(/*time_is_forward=*/true)) return false;
1832 if (!generate_cuts(/*time_is_forward=*/false)) return false;
1833 return true;
1834 };
1835 return result;
1836}
1837
1839 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
1840 const AffineExpression& capacity, Model* model) {
1841 CutGenerator result;
1842 result.only_run_at_level_zero = true;
1843 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
1844 &result.vars);
1845 AddIntegerVariableFromIntervals(helper, model, &result.vars,
1848
1849 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1850 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
1851 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
1852 sat_solver,
1853 model](LinearConstraintManager* manager) -> bool {
1854 auto generate_cuts = [integer_trail, sat_solver, model, manager, helper,
1855 demands_helper,
1856 capacity](bool time_is_forward) -> bool {
1857 DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0);
1858 if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) {
1859 return false;
1860 }
1861 if (!demands_helper->CacheAllEnergyValues()) return true;
1862
1863 std::vector<CompletionTimeEvent> events;
1864 const auto& lp_values = manager->LpValues();
1865 const VariablesAssignment& assignment = sat_solver->Assignment();
1866 for (int index = 0; index < helper->NumTasks(); ++index) {
1867 if (!helper->IsPresent(index)) continue;
1868 if (!DecomposedEnergyIsPropagated(assignment, index, helper,
1869 demands_helper)) {
1870 return true; // Propagation did not reach a fixed point. Abort.
1871 }
1872 if (helper->SizeMin(index) > 0 &&
1873 demands_helper->DemandMin(index) > 0) {
1874 CompletionTimeEvent event(index, helper, demands_helper);
1875 event.lp_end = event.end.LpValue(lp_values);
1876 events.push_back(event);
1877 }
1878 }
1879
1880 CtExhaustiveHelper helper;
1881 helper.Init(events, model);
1882
1883 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
1885 "CumulativeCompletionTimeExhaustive", events, capacity_max,
1886 helper, model, manager)) {
1887 return false;
1888 }
1889
1890 GenerateCompletionTimeCutsWithEnergy("CumulativeCompletionTimeQueyrane",
1891 std::move(events), capacity_max,
1892 model, manager);
1893 return true;
1894 };
1895
1896 if (!generate_cuts(/*time_is_forward=*/true)) return false;
1897 if (!generate_cuts(/*time_is_forward=*/false)) return false;
1898 return true;
1899 };
1900 return result;
1901}
1902
1903} // namespace sat
1904} // namespace operations_research
RelationStatus GetLevelZeroPrecedenceStatus(AffineExpression a, AffineExpression b) const
Return the status of a <= b;.
const CompactVectorVector< int > & predecessors() const
void Init(absl::Span< const CompletionTimeEvent > events, Model *model)
bool PermutationIsCompatibleWithPrecedences(absl::Span< const CompletionTimeEvent > events, absl::Span< const int > permutation)
std::vector< CompletionTimeEvent > residual_events_
DagTopologicalSortIterator valid_permutation_iterator_
void AddArc(int from, int to)
Must be called before iteration starts or between iterations.
Definition util.h:1110
IntegerValue LowerBound(IntegerVariable i) const
Returns the current lower/upper bound of the given integer variable.
Definition integer.h:1317
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1321
IntegerValue FixedValue(IntegerVariable i) const
Checks that the variable is fixed and returns its value.
Definition integer.h:1329
bool IsFixed(IntegerVariable i) const
Checks if the variable is fixed.
Definition integer.h:1325
void AddQuadraticLowerBound(AffineExpression left, AffineExpression right, IntegerTrail *integer_trail, bool *is_quadratic=nullptr)
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff=IntegerValue(1))
void AddTerm(IntegerVariable var, IntegerValue coeff)
void AddLinearExpression(const LinearExpression &expr)
void AddConstant(IntegerValue value)
Adds the corresponding term to the current linear expression.
void Clear()
Clears all added terms and constants. Keeps the original bounds.
LinearConstraint BuildConstraint(IntegerValue lb, IntegerValue ub)
ABSL_MUST_USE_RESULT bool AddDecomposedProduct(absl::Span< const LiteralValueValue > product)
const util_intops::StrongVector< IntegerVariable, double > & LpValues()
To simplify CutGenerator api.
LiteralIndex Index() const
Definition sat_base.h:91
void AddChoices(absl::Span< const int64_t > choices)
Definition util.cc:514
void Add(int64_t value)
Add a value to the base set for which subset sums will be taken.
Definition util.cc:507
int NumTasks() const
Returns the number of task.
absl::Span< const AffineExpression > Sizes() const
ABSL_MUST_USE_RESULT bool SynchronizeAndSetTimeDirection(bool is_forward)
const std::vector< std::vector< LiteralValueValue > > & DecomposedEnergies() const
const std::vector< AffineExpression > & Demands() const
ABSL_MUST_USE_RESULT bool AddLinearizedDemand(int t, LinearConstraintBuilder *builder) const
void TransferToManager(LinearConstraintManager *manager)
Empty the local pool and add all its content to the manager.
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
Adds a cut to the local pool.
time_limit
Definition solve.cc:22
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:55
bool AddSquareTo(IntegerValue a, IntegerValue *result)
Computes result += a * a, and return false iff there is an overflow.
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Computes result += a * b, and return false iff there is an overflow.
CutGenerator CreateCumulativeCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
bool AddTo(IntegerValue a, IntegerValue *result)
CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins(absl::Span< const CompletionTimeEvent > events, IntegerValue capacity_max, double unweighted_threshold, double weighted_threshold, CtExhaustiveHelper &helper, double &min_sum_of_ends, double &min_sum_of_weighted_ends, bool &cut_use_precedences, int &exploration_credit)
void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, std::vector< CompletionTimeEvent > events, IntegerValue capacity_max, Model *model, LinearConstraintManager *manager)
IntegerValue CeilRatio(IntegerValue dividend, IntegerValue positive_divisor)
const LiteralIndex kNoLiteralIndex(-1)
CutGenerator CreateCumulativePrecedenceCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound(absl::string_view cut_name, std::vector< CompletionTimeEvent > events, IntegerValue capacity_max, CtExhaustiveHelper &helper, Model *model, LinearConstraintManager *manager)
void GenerateCumulativeEnergeticCuts(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< EnergyEvent > events, const AffineExpression &capacity, TimeLimit *time_limit, Model *model, LinearConstraintManager *manager)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
CutGenerator CreateCumulativeEnergyCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, const std::optional< AffineExpression > &makespan, Model *model)
void GenerateCutsBetweenPairOfNonOverlappingTasks(absl::string_view cut_name, bool ignore_zero_size_intervals, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< CachedIntervalData > events, IntegerValue capacity_max, Model *model, LinearConstraintManager *manager)
void AddIntegerVariableFromIntervals(const SchedulingConstraintHelper *helper, Model *model, std::vector< IntegerVariable > *vars, int mask)
void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< EnergyEvent > events, IntegerValue capacity, AffineExpression makespan, TimeLimit *time_limit, Model *model, LinearConstraintManager *manager)
CutGenerator CreateCumulativeTimeTableCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void AppendVariablesFromCapacityAndDemands(const AffineExpression &capacity, SchedulingDemandHelper *demands_helper, Model *model, std::vector< IntegerVariable > *vars)
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, Model *model)
IntegerValue CapAddI(IntegerValue a, IntegerValue b)
CutGenerator CreateNoOverlapEnergyCutGenerator(SchedulingConstraintHelper *helper, const std::optional< AffineExpression > &makespan, Model *model)
CutGenerator CreateNoOverlapPrecedenceCutGenerator(SchedulingConstraintHelper *helper, Model *model)
bool AtMinOrMaxInt64I(IntegerValue t)
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
Overflows and saturated arithmetic.
double ToDouble(IntegerValue value)
IntegerValue ComputeEnergyMinInWindow(IntegerValue start_min, IntegerValue start_max, IntegerValue end_min, IntegerValue end_max, IntegerValue size_min, IntegerValue demand_min, absl::Span< const LiteralValueValue > filtered_energy, IntegerValue window_start, IntegerValue window_end)
In SWIG mode, we don't want anything besides these top-level includes.
ClosedInterval::Iterator end(ClosedInterval interval)
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
Returns the affine expression value under a given LP solution.
CachedIntervalData(int t, SchedulingConstraintHelper *helper)
Internal methods and data structures, useful for testing.
std::vector< LiteralValueValue > decomposed_energy
AffineExpression start
Start and end affine expressions and lp value of the end of the interval.
IntegerValue energy_min
The energy min of this event.
CompletionTimeEvent(int t, SchedulingConstraintHelper *x_helper, SchedulingDemandHelper *demands_helper)
IntegerValue start_min
Cache of the bounds of the interval.
int task_index
The index of the task in the helper.
IntegerValue demand_min
Cache of the bounds of the demand.
absl::AnyInvocable< bool(LinearConstraintManager *manager)> generate_cuts
Definition cuts.h:59
std::vector< IntegerVariable > vars
Definition cuts.h:58
AffineExpression demand
We need this for linearizing the energy in some cases.
ABSL_MUST_USE_RESULT bool FillEnergyLp(AffineExpression size, const util_intops::StrongVector< IntegerVariable, double > &lp_values, Model *model)
bool energy_is_quadratic
True if linearized_energy is not exact and a McCormick relaxation.
IntegerValue start_min
Cache of the bounds of the interval.
LiteralIndex presence_literal_index
If set, this event is optional and its presence is controlled by this.
EnergyEvent(int t, SchedulingConstraintHelper *x_helper)
IntegerValue demand_min
Cache of the bounds of the demand.
IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const
std::vector< LiteralValueValue > decomposed_energy
IntegerValue energy_min
The energy min of this event.
double NormalizedViolation(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const