Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
scheduling_cuts.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdint.h>
17
18#include <algorithm>
19#include <cmath>
20#include <limits>
21#include <memory>
22#include <optional>
23#include <string>
24#include <tuple>
25#include <utility>
26#include <vector>
27
28#include "absl/algorithm/container.h"
29#include "absl/base/attributes.h"
30#include "absl/container/btree_set.h"
31#include "absl/container/flat_hash_map.h"
32#include "absl/log/check.h"
33#include "absl/strings/str_cat.h"
34#include "absl/strings/str_join.h"
35#include "absl/strings/string_view.h"
36#include "absl/types/span.h"
40#include "ortools/sat/cuts.h"
41#include "ortools/sat/integer.h"
46#include "ortools/sat/model.h"
51#include "ortools/sat/util.h"
55
56namespace operations_research {
57namespace sat {
58
59namespace {
60// Minimum amount of violation of the cut constraint by the solution. This
61// is needed to avoid numerical issues and adding cuts with minor effect.
62const double kMinCutViolation = 1e-4;
63
64// Checks that the literals of the decomposed energy (if present) and the size
65// and demand of a cumulative task are in sync.
66// In some rare instances, this is not the case. In that case, it is better not
67// to try to generate cuts.
68bool DecomposedEnergyIsPropagated(const VariablesAssignment& assignment, int t,
70 SchedulingDemandHelper* demands_helper) {
71 const std::vector<LiteralValueValue>& decomposed_energy =
72 demands_helper->DecomposedEnergies()[t];
73 if (decomposed_energy.empty()) return true;
74
75 // Checks the propagation of the exactly_one constraint on literals.
76 int num_false_literals = 0;
77 int num_true_literals = 0;
78 for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) {
79 if (assignment.LiteralIsFalse(lit)) ++num_false_literals;
80 if (assignment.LiteralIsTrue(lit)) ++num_true_literals;
81 }
82 if (num_true_literals > 1) return false;
83 if (num_true_literals == 1 &&
84 num_false_literals < decomposed_energy.size() - 1) {
85 return false;
86 }
87 if (num_false_literals == decomposed_energy.size()) return false;
88 if (decomposed_energy.size() == 1 && num_true_literals != 1) return false;
89
90 // Checks the propagations of the bounds of the size and the demand.
91 IntegerValue propagated_size_min = kMaxIntegerValue;
92 IntegerValue propagated_size_max = kMinIntegerValue;
93 IntegerValue propagated_demand_min = kMaxIntegerValue;
94 IntegerValue propagated_demand_max = kMinIntegerValue;
95 for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) {
96 if (assignment.LiteralIsFalse(lit)) continue;
97
98 if (assignment.LiteralIsTrue(lit)) {
99 if (fixed_size != helper->SizeMin(t) ||
100 fixed_size != helper->SizeMax(t) ||
101 fixed_demand != demands_helper->DemandMin(t) ||
102 fixed_demand != demands_helper->DemandMax(t)) {
103 return false;
104 }
105 return true;
106 }
107
108 if (fixed_size < helper->SizeMin(t) || fixed_size > helper->SizeMax(t) ||
109 fixed_demand < demands_helper->DemandMin(t) ||
110 fixed_demand > demands_helper->DemandMax(t)) {
111 return false;
112 }
113 propagated_size_min = std::min(propagated_size_min, fixed_size);
114 propagated_size_max = std::max(propagated_size_max, fixed_size);
115 propagated_demand_min = std::min(propagated_demand_min, fixed_demand);
116 propagated_demand_max = std::max(propagated_demand_max, fixed_demand);
117 }
118
119 if (propagated_size_min != helper->SizeMin(t) ||
120 propagated_size_max != helper->SizeMax(t) ||
121 propagated_demand_min != demands_helper->DemandMin(t) ||
122 propagated_demand_max != demands_helper->DemandMax(t)) {
123 return false;
124 }
125
126 return true;
127}
128
129} // namespace
130
133 : start_min(x_helper->StartMin(t)),
134 start_max(x_helper->StartMax(t)),
135 end_min(x_helper->EndMin(t)),
136 end_max(x_helper->EndMax(t)),
137 size_min(x_helper->SizeMin(t)) {}
138
139 // Cache of the bounds of the interval
140 IntegerValue start_min;
141 IntegerValue start_max;
142 IntegerValue end_min;
143 IntegerValue end_max;
144 IntegerValue size_min;
145
146 // Cache of the bounds of the demand.
147 IntegerValue demand_min;
148
149 // The energy min of this event.
150 IntegerValue energy_min;
151
152 // If non empty, a decomposed view of the energy of this event.
153 // First value in each pair is size, second is demand.
154 std::vector<LiteralValueValue> decomposed_energy;
156
157 // We need this for linearizing the energy in some cases.
159
160 // If set, this event is optional and its presence is controlled by this.
162
163 // A linear expression which is a valid lower bound on the total energy of
164 // this event. We also cache the activity of the expression to not recompute
165 // it all the time.
168
169 // True if linearized_energy is not exact and a McCormick relaxation.
171
172 // The actual value of the presence literal of the interval(s) is checked
173 // when the event is created. A value of kNoLiteralIndex indicates that either
174 // the interval was not optional, or that its presence literal is true at
175 // level zero.
177
178 // Computes the mandatory minimal overlap of the interval with the time window
179 // [start, end].
180 IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const {
181 return std::max(
182 std::min({end_min - start, end - start_max, size_min, end - start}),
183 IntegerValue(0));
184 }
185
186 // This method expects all the other fields to have been filled before.
187 // It must be called before the EnergyEvent is used.
188 ABSL_MUST_USE_RESULT bool FillEnergyLp(
189 AffineExpression size,
191 Model* model) {
192 LinearConstraintBuilder tmp_energy(model);
193 if (IsPresent()) {
194 if (!decomposed_energy.empty()) {
195 if (!tmp_energy.AddDecomposedProduct(decomposed_energy)) return false;
196 } else {
197 tmp_energy.AddQuadraticLowerBound(size, demand,
198 model->GetOrCreate<IntegerTrail>(),
200 }
201 } else {
203 energy_min)) {
204 return false;
205 }
206 }
207 linearized_energy = tmp_energy.BuildExpression();
209 return true;
210 }
211
212 std::string DebugString() const {
213 return absl::StrCat(
214 "EnergyEvent(start_min = ", start_min, ", start_max = ", start_max,
215 ", end_min = ", end_min, ", end_max = ", end_max, ", demand = ", demand,
216 ", energy = ",
217 decomposed_energy.empty()
218 ? "{}"
219 : absl::StrCat(decomposed_energy.size(), " terms"),
220 ", presence_literal_index = ", presence_literal_index, ")");
221 }
222};
223
224namespace {
225
226// Compute the energetic contribution of a task in a given time window, and
227// add it to the cut. It returns false if it tried to generate the cut, and
228// failed.
229ABSL_MUST_USE_RESULT bool AddOneEvent(
230 const EnergyEvent& event, IntegerValue window_start,
231 IntegerValue window_end, LinearConstraintBuilder& cut,
232 bool* add_energy_to_name = nullptr, bool* add_quadratic_to_name = nullptr,
233 bool* add_opt_to_name = nullptr, bool* add_lifted_to_name = nullptr) {
234 if (event.end_min <= window_start || event.start_max >= window_end) {
235 return true; // Event can move outside the time window.
236 }
237
238 if (event.start_min >= window_start && event.end_max <= window_end) {
239 // Event is always contained by the time window.
240 cut.AddLinearExpression(event.linearized_energy);
241
242 if (event.energy_is_quadratic && add_quadratic_to_name != nullptr) {
243 *add_quadratic_to_name = true;
244 }
245 if (add_energy_to_name != nullptr &&
246 event.energy_min > event.size_min * event.demand_min) {
247 *add_energy_to_name = true;
248 }
249 if (!event.IsPresent() && add_opt_to_name != nullptr) {
250 *add_opt_to_name = true;
251 }
252 } else { // The event has a mandatory overlap with the time window.
253 const IntegerValue min_overlap =
254 event.GetMinOverlap(window_start, window_end);
255 if (min_overlap <= 0) return true;
256 if (add_lifted_to_name != nullptr) *add_lifted_to_name = true;
257
258 if (event.IsPresent()) {
259 const std::vector<LiteralValueValue>& energy = event.decomposed_energy;
260 if (energy.empty()) {
261 cut.AddTerm(event.demand, min_overlap);
262 } else {
263 const IntegerValue window_size = window_end - window_start;
264 for (const auto [lit, fixed_size, fixed_demand] : energy) {
265 const IntegerValue alt_end_min =
266 std::max(event.end_min, event.start_min + fixed_size);
267 const IntegerValue alt_start_max =
268 std::min(event.start_max, event.end_max - fixed_size);
269 const IntegerValue energy_min =
270 fixed_demand *
271 std::min({alt_end_min - window_start, window_end - alt_start_max,
272 fixed_size, window_size});
273 if (energy_min == 0) continue;
274 if (!cut.AddLiteralTerm(lit, energy_min)) return false;
275 }
276 if (add_energy_to_name != nullptr) *add_energy_to_name = true;
277 }
278 } else {
279 if (add_opt_to_name != nullptr) *add_opt_to_name = true;
280 const IntegerValue min_energy = ComputeEnergyMinInWindow(
281 event.start_min, event.start_max, event.end_min, event.end_max,
282 event.size_min, event.demand_min, event.decomposed_energy,
283 window_start, window_end);
284 if (min_energy > event.size_min * event.demand_min &&
285 add_energy_to_name != nullptr) {
286 *add_energy_to_name = true;
287 }
288 if (!cut.AddLiteralTerm(Literal(event.presence_literal_index),
289 min_energy)) {
290 return false;
291 }
292 }
293 }
294 return true;
295}
296
297// Returns the list of all possible demand values for the given event.
298// It returns an empty vector is the number of values is too large.
299std::vector<int64_t> FindPossibleDemands(const EnergyEvent& event,
300 const VariablesAssignment& assignment,
301 IntegerTrail* integer_trail) {
302 std::vector<int64_t> possible_demands;
303 if (event.decomposed_energy.empty()) {
304 if (integer_trail->IsFixed(event.demand)) {
305 possible_demands.push_back(
306 integer_trail->FixedValue(event.demand).value());
307 } else {
308 if (integer_trail->InitialVariableDomain(event.demand.var).Size() >
309 1000000) {
310 return {};
311 }
312 for (const int64_t var_value :
313 integer_trail->InitialVariableDomain(event.demand.var).Values()) {
314 possible_demands.push_back(event.demand.ValueAt(var_value).value());
315 }
316 }
317 } else {
318 for (const auto [lit, fixed_size, fixed_demand] : event.decomposed_energy) {
319 if (assignment.LiteralIsFalse(lit)) continue;
320 possible_demands.push_back(fixed_demand.value());
321 }
322 }
323 return possible_demands;
324}
325
326} // namespace
327
328// This cumulative energetic cut generator will split the cumulative span in 2
329// regions.
330//
331// In the region before the min of the makespan, we will compute a more
332// precise reachable profile and have a better estimation of the energy
333// available between two time point. the improvement can come from two sources:
334// - subset sum indicates that the max capacity cannot be reached.
335// - sum of demands < max capacity.
336//
337// In the region after the min of the makespan, we will use
338// fixed_capacity * (makespan - makespan_min)
339// as the available energy.
341 absl::string_view cut_name,
343 absl::Span<std::unique_ptr<EnergyEvent>> events, IntegerValue capacity,
344 AffineExpression makespan, TimeLimit* time_limit, Model* model,
345 TopNCuts& top_n_cuts) {
346 // Checks the precondition of the code.
347 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
348 DCHECK(integer_trail->IsFixed(capacity));
349
350 const VariablesAssignment& assignment =
351 model->GetOrCreate<Trail>()->Assignment();
352
353 // Compute relevant time points.
354 // TODO(user): We could reduce this set.
355 // TODO(user): we can compute the max usage between makespan_min and
356 // makespan_max.
357 std::vector<IntegerValue> time_points;
358 std::vector<std::vector<int64_t>> possible_demands(events.size());
359 const IntegerValue makespan_min = integer_trail->LowerBound(makespan);
360 IntegerValue max_end_min = kMinIntegerValue; // Used to abort early.
361 IntegerValue max_end_max = kMinIntegerValue; // Used as a sentinel.
362 for (int i = 0; i < events.size(); ++i) {
363 const EnergyEvent& event = *events[i];
364 if (event.start_min < makespan_min) {
365 time_points.push_back(event.start_min);
366 }
367 if (event.start_max < makespan_min) {
368 time_points.push_back(event.start_max);
369 }
370 if (event.end_min < makespan_min) {
371 time_points.push_back(event.end_min);
372 }
373 if (event.end_max < makespan_min) {
374 time_points.push_back(event.end_max);
375 }
376 max_end_min = std::max(max_end_min, event.end_min);
377 max_end_max = std::max(max_end_max, event.end_max);
378 possible_demands[i] = FindPossibleDemands(event, assignment, integer_trail);
379 }
380 time_points.push_back(makespan_min);
381 time_points.push_back(max_end_max);
383
384 const int num_time_points = time_points.size();
385 absl::flat_hash_map<IntegerValue, IntegerValue> reachable_capacity_ending_at;
386
387 MaxBoundedSubsetSum reachable_capacity_subset_sum(capacity.value());
388 for (int i = 1; i < num_time_points; ++i) {
389 const IntegerValue window_start = time_points[i - 1];
390 const IntegerValue window_end = time_points[i];
391 reachable_capacity_subset_sum.Reset(capacity.value());
392 for (int i = 0; i < events.size(); ++i) {
393 const EnergyEvent& event = *events[i];
394 if (event.start_min >= window_end || event.end_max <= window_start) {
395 continue;
396 }
397 if (possible_demands[i].empty()) { // Number of values was too large.
398 // In practice, it stops the DP as the upper bound is reached.
399 reachable_capacity_subset_sum.Add(capacity.value());
400 } else {
401 reachable_capacity_subset_sum.AddChoices(possible_demands[i]);
402 }
403 if (reachable_capacity_subset_sum.CurrentMax() == capacity.value()) break;
404 }
405 reachable_capacity_ending_at[window_end] =
406 reachable_capacity_subset_sum.CurrentMax();
407 }
408
409 const double capacity_lp = ToDouble(capacity);
410 const double makespan_lp = makespan.LpValue(lp_values);
411 const double makespan_min_lp = ToDouble(makespan_min);
412 LinearConstraintBuilder temp_builder(model);
413 for (int i = 0; i + 1 < num_time_points; ++i) {
414 // Checks the time limit if the problem is too big.
415 if (events.size() > 50 && time_limit->LimitReached()) return;
416
417 const IntegerValue window_start = time_points[i];
418 // After max_end_min, all tasks can fit before window_start.
419 if (window_start >= max_end_min) break;
420
421 IntegerValue cumulated_max_energy = 0;
422 IntegerValue cumulated_max_energy_before_makespan_min = 0;
423 bool use_subset_sum = false;
424 bool use_subset_sum_before_makespan_min = false;
425
426 for (int j = i + 1; j < num_time_points; ++j) {
427 const IntegerValue strip_start = time_points[j - 1];
428 const IntegerValue window_end = time_points[j];
429 const IntegerValue max_reachable_capacity_in_current_strip =
430 reachable_capacity_ending_at[window_end];
431 DCHECK_LE(max_reachable_capacity_in_current_strip, capacity);
432
433 // Update states for the name of the generated cut.
434 if (max_reachable_capacity_in_current_strip < capacity) {
435 use_subset_sum = true;
436 if (window_end <= makespan_min) {
437 use_subset_sum_before_makespan_min = true;
438 }
439 }
440
441 const IntegerValue energy_in_strip =
442 (window_end - strip_start) * max_reachable_capacity_in_current_strip;
443 cumulated_max_energy += energy_in_strip;
444 if (window_end <= makespan_min) {
445 cumulated_max_energy_before_makespan_min += energy_in_strip;
446 }
447
448 if (window_start >= makespan_min) {
449 DCHECK_EQ(cumulated_max_energy_before_makespan_min, 0);
450 }
451 DCHECK_LE(cumulated_max_energy, capacity * (window_end - window_start));
452 const double max_energy_up_to_makespan_lp =
453 strip_start >= makespan_min
454 ? ToDouble(cumulated_max_energy_before_makespan_min) +
455 (makespan_lp - makespan_min_lp) * capacity_lp
456 : std::numeric_limits<double>::infinity();
457
458 // We prefer using the makespan as the cut will tighten itself when the
459 // objective value is improved.
460 //
461 // We reuse the min cut violation to allow some slack in the comparison
462 // between the two computed energy values.
463 bool cut_generated = true;
464 bool add_opt_to_name = false;
465 bool add_lifted_to_name = false;
466 bool add_quadratic_to_name = false;
467 bool add_energy_to_name = false;
468
469 temp_builder.Clear();
470 const bool use_makespan =
471 max_energy_up_to_makespan_lp <=
472 ToDouble(cumulated_max_energy) + kMinCutViolation;
473 const bool use_subset_sum_in_cut =
474 use_makespan ? use_subset_sum_before_makespan_min : use_subset_sum;
475
476 if (use_makespan) { // Add the energy from makespan_min to makespan.
477 temp_builder.AddConstant(makespan_min * capacity);
478 temp_builder.AddTerm(makespan, -capacity);
479 }
480
481 // Add contributions from all events.
482 for (const std::unique_ptr<EnergyEvent>& event : events) {
483 if (!AddOneEvent(*event, window_start, window_end, temp_builder,
484 &add_energy_to_name, &add_quadratic_to_name,
485 &add_opt_to_name, &add_lifted_to_name)) {
486 cut_generated = false;
487 break; // Exit the event loop.
488 }
489 }
490
491 // We can break here are any further iteration on j will hit the same
492 // issue.
493 if (!cut_generated) break;
494
495 // Build the cut to evaluate its efficacy.
496 const IntegerValue energy_rhs =
497 use_makespan ? cumulated_max_energy_before_makespan_min
498 : cumulated_max_energy;
499 LinearConstraint potential_cut =
500 temp_builder.BuildConstraint(kMinIntegerValue, energy_rhs);
501
502 if (potential_cut.NormalizedViolation(lp_values) >= kMinCutViolation) {
503 std::string full_name(cut_name);
504 if (add_energy_to_name) full_name.append("_energy");
505 if (add_lifted_to_name) full_name.append("_lifted");
506 if (use_makespan) full_name.append("_makespan");
507 if (add_opt_to_name) full_name.append("_optional");
508 if (add_quadratic_to_name) full_name.append("_quadratic");
509 if (use_subset_sum_in_cut) full_name.append("_subsetsum");
510 top_n_cuts.AddCut(std::move(potential_cut), full_name, lp_values);
511 }
512 }
513 }
514}
515
517 absl::string_view cut_name,
519 absl::Span<std::unique_ptr<EnergyEvent>> events,
520 const AffineExpression& capacity, TimeLimit* time_limit, Model* model,
521 TopNCuts& top_n_cuts) {
522 double max_possible_energy_lp = 0.0;
523 for (const std::unique_ptr<EnergyEvent>& event : events) {
524 max_possible_energy_lp += event->linearized_energy_lp_value;
525 }
526
527 // Currently, we look at all the possible time windows, and will push all cuts
528 // in the TopNCuts object. From our observations, this generator creates only
529 // a few cuts for a given run.
530 //
531 // The complexity of this loop is n^3. if we follow the latest research, we
532 // could implement this in n log^2(n). Still, this is not visible in the
533 // profile as we only this method at the root node,
534 const double capacity_lp = capacity.LpValue(lp_values);
535
536 // Compute relevant time points.
537 // TODO(user): We could reduce this set.
538 absl::btree_set<IntegerValue> time_points_set;
539 IntegerValue max_end_min = kMinIntegerValue;
540 for (const std::unique_ptr<EnergyEvent>& event : events) {
541 time_points_set.insert(event->start_min);
542 time_points_set.insert(event->start_max);
543 time_points_set.insert(event->end_min);
544 time_points_set.insert(event->end_max);
545 max_end_min = std::max(max_end_min, event->end_min);
546 }
547 const std::vector<IntegerValue> time_points(time_points_set.begin(),
548 time_points_set.end());
549 const int num_time_points = time_points.size();
550
551 LinearConstraintBuilder temp_builder(model);
552 for (int i = 0; i + 1 < num_time_points; ++i) {
553 // Checks the time limit if the problem is too big.
554 if (events.size() > 50 && time_limit->LimitReached()) return;
555
556 const IntegerValue window_start = time_points[i];
557 // After max_end_min, all tasks can fit before window_start.
558 if (window_start >= max_end_min) break;
559
560 for (int j = i + 1; j < num_time_points; ++j) {
561 const IntegerValue window_end = time_points[j];
562 const double available_energy_lp =
563 ToDouble(window_end - window_start) * capacity_lp;
564 if (available_energy_lp >= max_possible_energy_lp) break;
565
566 bool cut_generated = true;
567 bool add_opt_to_name = false;
568 bool add_lifted_to_name = false;
569 bool add_quadratic_to_name = false;
570 bool add_energy_to_name = false;
571 temp_builder.Clear();
572
573 // Compute the max energy available for the tasks.
574 temp_builder.AddTerm(capacity, window_start - window_end);
575
576 // Add all contributions.
577 for (const std::unique_ptr<EnergyEvent>& event : events) {
578 if (!AddOneEvent(*event, window_start, window_end, temp_builder,
579 &add_energy_to_name, &add_quadratic_to_name,
580 &add_opt_to_name, &add_lifted_to_name)) {
581 cut_generated = false;
582 break; // Exit the event loop.
583 }
584 }
585
586 // We can break here are any further iteration on j will hit the same
587 // issue.
588 if (!cut_generated) break;
589
590 // Build the cut to evaluate its efficacy.
591 LinearConstraint potential_cut =
592 temp_builder.BuildConstraint(kMinIntegerValue, 0);
593
594 if (potential_cut.NormalizedViolation(lp_values) >= kMinCutViolation) {
595 std::string full_name(cut_name);
596 if (add_energy_to_name) full_name.append("_energy");
597 if (add_lifted_to_name) full_name.append("_lifted");
598 if (add_opt_to_name) full_name.append("_optional");
599 if (add_quadratic_to_name) full_name.append("_quadratic");
600 top_n_cuts.AddCut(std::move(potential_cut), full_name, lp_values);
601 }
602 }
603 }
604}
605
607 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
608 const AffineExpression& capacity,
609 const std::optional<AffineExpression>& makespan, Model* model) {
610 CutGenerator result;
611 result.only_run_at_level_zero = true;
612 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
613 &result.vars);
615 helper, model, &result.vars,
617 if (makespan.has_value() && !makespan.value().IsConstant()) {
618 result.vars.push_back(makespan.value().var);
619 }
621 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
622 TimeLimit* time_limit = model->GetOrCreate<TimeLimit>();
623 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
624
625 result.generate_cuts = [makespan, capacity, demands_helper, helper,
626 integer_trail, time_limit, sat_solver,
627 model](LinearConstraintManager* manager) {
628 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
629 if (!demands_helper->CacheAllEnergyValues()) return true;
630
631 const auto& lp_values = manager->LpValues();
632 const VariablesAssignment& assignment = sat_solver->Assignment();
633
634 std::vector<std::unique_ptr<EnergyEvent>> events;
635 for (int i = 0; i < helper->NumTasks(); ++i) {
636 if (helper->IsAbsent(i)) continue;
637 // TODO(user): use level 0 bounds ?
638 if (demands_helper->DemandMax(i) == 0 || helper->SizeMin(i) == 0) {
639 continue;
640 }
641 if (!DecomposedEnergyIsPropagated(assignment, i, helper,
642 demands_helper)) {
643 return true; // Propagation did not reach a fixed point. Abort.
644 }
645
646 auto e = std::make_unique<EnergyEvent>(i, helper);
647 e->demand = demands_helper->Demands()[i];
648 e->demand_min = demands_helper->DemandMin(i);
649 e->decomposed_energy = demands_helper->DecomposedEnergies()[i];
650 if (e->decomposed_energy.size() == 1) {
651 // We know it was propagated correctly. We can remove this field.
652 e->decomposed_energy.clear();
653 }
654 e->energy_min = demands_helper->EnergyMin(i);
655 e->energy_is_quadratic = demands_helper->EnergyIsQuadratic(i);
656 if (!helper->IsPresent(i)) {
657 e->presence_literal_index = helper->PresenceLiteral(i).Index();
658 }
659 // We can always skip events.
660 if (!e->FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
661 events.push_back(std::move(e));
662 }
663
664 TopNCuts top_n_cuts(5);
665 std::vector<absl::Span<std::unique_ptr<EnergyEvent>>> disjoint_events =
667 // Can we pass cluster as const. It would mean sorting before.
668 for (const absl::Span<std::unique_ptr<EnergyEvent>> cluster :
669 disjoint_events) {
670 if (makespan.has_value() && integer_trail->IsFixed(capacity)) {
672 "CumulativeEnergyM", lp_values, cluster,
673 integer_trail->FixedValue(capacity), makespan.value(), time_limit,
674 model, top_n_cuts);
675
676 } else {
677 GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, cluster,
678 capacity, time_limit, model,
679 top_n_cuts);
680 }
681 }
682 top_n_cuts.TransferToManager(manager);
683 return true;
684 };
685
686 return result;
687}
688
691 const std::optional<AffineExpression>& makespan, Model* model) {
692 CutGenerator result;
693 result.only_run_at_level_zero = true;
695 helper, model, &result.vars,
697 if (makespan.has_value() && !makespan.value().IsConstant()) {
698 result.vars.push_back(makespan.value().var);
699 }
701 TimeLimit* time_limit = model->GetOrCreate<TimeLimit>();
702
703 result.generate_cuts = [makespan, helper, time_limit,
704 model](LinearConstraintManager* manager) {
705 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
706
707 const auto& lp_values = manager->LpValues();
708 std::vector<std::unique_ptr<EnergyEvent>> events;
709 for (int i = 0; i < helper->NumTasks(); ++i) {
710 if (helper->IsAbsent(i)) continue;
711 if (helper->SizeMin(i) == 0) continue;
712
713 auto e = std::make_unique<EnergyEvent>(i, helper);
714 e->demand = IntegerValue(1);
715 e->demand_min = IntegerValue(1);
716 e->energy_min = e->size_min;
717 if (!helper->IsPresent(i)) {
718 e->presence_literal_index = helper->PresenceLiteral(i).Index();
719 }
720 // We can always skip events.
721 if (!e->FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
722 events.push_back(std::move(e));
723 }
724
725 TopNCuts top_n_cuts(5);
726 std::vector<absl::Span<std::unique_ptr<EnergyEvent>>> disjoint_events =
728 for (const absl::Span<std::unique_ptr<EnergyEvent>> cluster :
729 disjoint_events) {
730 if (makespan.has_value()) {
732 "NoOverlapEnergyM", lp_values, cluster,
733 /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model,
734 top_n_cuts);
735 } else {
736 GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, cluster,
737 /*capacity=*/IntegerValue(1),
738 time_limit, model, top_n_cuts);
739 }
740 }
741 top_n_cuts.TransferToManager(manager);
742 return true;
743 };
744 return result;
745}
746
748 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
749 const AffineExpression& capacity, Model* model) {
750 CutGenerator result;
751 result.only_run_at_level_zero = true;
752 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
753 &result.vars);
754 AddIntegerVariableFromIntervals(helper, model, &result.vars,
757
758 struct TimeTableEvent {
759 int interval_index;
760 IntegerValue time;
761 LinearExpression demand;
762 double demand_lp = 0.0;
763 bool is_positive = false;
764 bool use_decomposed_energy_min = false;
765 bool is_optional = false;
766 };
767
768 result.generate_cuts = [helper, capacity, demands_helper,
769 model](LinearConstraintManager* manager) {
770 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
771 if (!demands_helper->CacheAllEnergyValues()) return true;
772
773 TopNCuts top_n_cuts(5);
774 std::vector<std::unique_ptr<TimeTableEvent>> events;
775 const auto& lp_values = manager->LpValues();
776 if (lp_values.empty()) return true; // No linear relaxation.
777 const double capacity_lp = capacity.LpValue(lp_values);
778
779 // Iterate through the intervals. If start_max < end_min, the demand
780 // is mandatory.
781 for (int i = 0; i < helper->NumTasks(); ++i) {
782 if (helper->IsAbsent(i)) continue;
783
784 const IntegerValue start_max = helper->StartMax(i);
785 const IntegerValue end_min = helper->EndMin(i);
786
787 if (start_max >= end_min) continue;
788
789 auto e1 = std::make_unique<TimeTableEvent>();
790 e1->interval_index = i;
791 e1->time = start_max;
792 {
793 LinearConstraintBuilder builder(model);
794 // Ignore the interval if the linearized demand fails.
795 if (!demands_helper->AddLinearizedDemand(i, &builder)) continue;
796 e1->demand = builder.BuildExpression();
797 }
798 e1->demand_lp = e1->demand.LpValue(lp_values);
799 e1->is_positive = true;
800 e1->use_decomposed_energy_min =
801 !demands_helper->DecomposedEnergies()[i].empty();
802 e1->is_optional = !helper->IsPresent(i);
803
804 auto e2 = std::make_unique<TimeTableEvent>(*e1);
805 e2->time = end_min;
806 e2->is_positive = false;
807
808 events.push_back(std::move(e1));
809 events.push_back(std::move(e2));
810 }
811
812 // Sort events by time.
813 // It is also important that all positive event with the same time as
814 // negative events appear after for the correctness of the algo below.
815 absl::c_stable_sort(events, [](const std::unique_ptr<TimeTableEvent>& i,
816 const std::unique_ptr<TimeTableEvent>& j) {
817 return std::tie(i->time, i->is_positive) <
818 std::tie(j->time, j->is_positive);
819 });
820
821 double sum_of_demand_lp = 0.0;
822 bool positive_event_added_since_last_check = false;
823 for (int i = 0; i < events.size(); ++i) {
824 const TimeTableEvent* e = events[i].get();
825 if (e->is_positive) {
826 positive_event_added_since_last_check = true;
827 sum_of_demand_lp += e->demand_lp;
828 continue;
829 }
830
831 if (positive_event_added_since_last_check) {
832 // Reset positive event added. We do not want to create cuts for
833 // each negative event in sequence.
834 positive_event_added_since_last_check = false;
835
836 if (sum_of_demand_lp >= capacity_lp + kMinCutViolation) {
837 // Create cut.
838 bool use_decomposed_energy_min = false;
839 bool use_optional = false;
840 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
841 cut.AddTerm(capacity, IntegerValue(-1));
842 // The i-th event, which is a negative event, follows a positive
843 // event. We must ignore it in our cut generation.
844 DCHECK(!events[i]->is_positive);
845 const IntegerValue time_point = events[i - 1]->time;
846
847 for (int j = 0; j < i; ++j) {
848 const TimeTableEvent* cut_event = events[j].get();
849 const int t = cut_event->interval_index;
850 DCHECK_LE(helper->StartMax(t), time_point);
851 if (!cut_event->is_positive || helper->EndMin(t) <= time_point) {
852 continue;
853 }
854
855 cut.AddLinearExpression(cut_event->demand, IntegerValue(1));
856 use_decomposed_energy_min |= cut_event->use_decomposed_energy_min;
857 use_optional |= cut_event->is_optional;
858 }
859
860 std::string cut_name = "CumulativeTimeTable";
861 if (use_optional) cut_name += "_optional";
862 if (use_decomposed_energy_min) cut_name += "_energy";
863 top_n_cuts.AddCut(cut.Build(), cut_name, lp_values);
864 }
865 }
866
867 // The demand_lp was added in case of a positive event. We need to
868 // remove it for a negative event.
869 sum_of_demand_lp -= e->demand_lp;
870 }
871 top_n_cuts.TransferToManager(manager);
872 return true;
873 };
874 return result;
875}
876
877// Cached Information about one interval.
878// Note that everything must correspond to level zero bounds, otherwise the
879// generated cut are not valid.
880
883 : start_min(helper->StartMin(t)),
884 start_max(helper->StartMax(t)),
885 start(helper->Starts()[t]),
886 end_min(helper->EndMin(t)),
887 end_max(helper->EndMax(t)),
888 end(helper->Ends()[t]),
889 size_min(helper->SizeMin(t)) {}
890
891 IntegerValue start_min;
892 IntegerValue start_max;
894 IntegerValue end_min;
895 IntegerValue end_max;
897 IntegerValue size_min;
898
899 IntegerValue demand_min;
900};
901
903 absl::string_view cut_name, bool ignore_zero_size_intervals,
905 absl::Span<std::unique_ptr<CachedIntervalData>> events,
906 IntegerValue capacity_max, Model* model, TopNCuts& top_n_cuts) {
907 const int num_events = events.size();
908 if (num_events <= 1) return;
909
910 absl::c_stable_sort(events,
911 [](const std::unique_ptr<CachedIntervalData>& e1,
912 const std::unique_ptr<CachedIntervalData>& e2) {
913 return std::tie(e1->start_min, e1->end_max) <
914 std::tie(e2->start_min, e2->end_max);
915 });
916
917 // Balas disjunctive cuts on 2 tasks a and b:
918 // start_1 * (duration_1 + start_min_1 - start_min_2) +
919 // start_2 * (duration_2 + start_min_2 - start_min_1) >=
920 // duration_1 * duration_2 +
921 // start_min_1 * duration_2 +
922 // start_min_2 * duration_1
923 // From: David L. Applegate, William J. Cook:
924 // A Computational Study of the Job-Shop Scheduling Problem. 149-156
925 // INFORMS Journal on Computing, Volume 3, Number 1, Winter 1991
926 const auto add_balas_disjunctive_cut =
927 [&](absl::string_view local_cut_name, IntegerValue start_min_1,
928 IntegerValue duration_min_1, AffineExpression start_1,
929 IntegerValue start_min_2, IntegerValue duration_min_2,
930 AffineExpression start_2) {
931 // Checks hypothesis from the cut.
932 if (start_min_2 >= start_min_1 + duration_min_1 ||
933 start_min_1 >= start_min_2 + duration_min_2) {
934 return;
935 }
936 const IntegerValue coeff_1 = duration_min_1 + start_min_1 - start_min_2;
937 const IntegerValue coeff_2 = duration_min_2 + start_min_2 - start_min_1;
938 const IntegerValue rhs = duration_min_1 * duration_min_2 +
939 duration_min_1 * start_min_2 +
940 duration_min_2 * start_min_1;
941
942 if (ToDouble(coeff_1) * start_1.LpValue(lp_values) +
943 ToDouble(coeff_2) * start_2.LpValue(lp_values) <=
944 ToDouble(rhs) - kMinCutViolation) {
946 cut.AddTerm(start_1, coeff_1);
947 cut.AddTerm(start_2, coeff_2);
948 top_n_cuts.AddCut(cut.Build(), local_cut_name, lp_values);
949 }
950 };
951
952 for (int i = 0; i + 1 < num_events; ++i) {
953 const CachedIntervalData* e1 = events[i].get();
954 for (int j = i + 1; j < num_events; ++j) {
955 const CachedIntervalData* e2 = events[j].get();
956 if (e2->start_min >= e1->end_max) break; // Break out of the index2 loop.
957
958 // Encode only the interesting pairs.
959 if (e1->demand_min + e2->demand_min <= capacity_max) continue;
960
961 if (ignore_zero_size_intervals &&
962 (e1->size_min <= 0 || e2->size_min <= 0)) {
963 continue;
964 }
965
966 const bool interval_1_can_precede_2 = e1->end_min <= e2->start_max;
967 const bool interval_2_can_precede_1 = e2->end_min <= e1->start_max;
968
969 if (interval_1_can_precede_2 && !interval_2_can_precede_1 &&
970 e1->end.LpValue(lp_values) >=
971 e2->start.LpValue(lp_values) + kMinCutViolation) {
972 // interval_1.end <= interval_2.start
973 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
974 cut.AddTerm(e1->end, IntegerValue(1));
975 cut.AddTerm(e2->start, IntegerValue(-1));
976 top_n_cuts.AddCut(cut.Build(),
977 absl::StrCat(cut_name, "DetectedPrecedence"),
978 lp_values);
979 } else if (interval_2_can_precede_1 && !interval_1_can_precede_2 &&
980 e2->end.LpValue(lp_values) >=
981 e1->start.LpValue(lp_values) + kMinCutViolation) {
982 // interval_2.end <= interval_1.start
983 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
984 cut.AddTerm(e2->end, IntegerValue(1));
985 cut.AddTerm(e1->start, IntegerValue(-1));
986 top_n_cuts.AddCut(cut.Build(),
987 absl::StrCat(cut_name, "DetectedPrecedence"),
988 lp_values);
989 } else {
990 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnStart"),
991 e1->start_min, e1->size_min, e1->start,
992 e2->start_min, e2->size_min, e2->start);
993 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnEnd"),
994 -e1->end_max, e1->size_min, e1->end.Negated(),
995 -e2->end_max, e2->size_min,
996 e2->end.Negated());
997 }
998 }
999 }
1000}
1001
1003 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
1004 const AffineExpression& capacity, Model* model) {
1005 CutGenerator result;
1006 result.only_run_at_level_zero = true;
1007 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
1008 &result.vars);
1010 helper, model, &result.vars,
1013
1014 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1015 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
1016 model](LinearConstraintManager* manager) {
1017 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1018
1019 std::vector<std::unique_ptr<CachedIntervalData>> events;
1020 for (int t = 0; t < helper->NumTasks(); ++t) {
1021 if (!helper->IsPresent(t)) continue;
1022 auto event = std::make_unique<CachedIntervalData>(t, helper);
1023 event->demand_min = demands_helper->DemandMin(t);
1024 events.push_back(std::move(event));
1025 }
1026
1027 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
1028
1029 TopNCuts top_n_cuts(5);
1030 std::vector<absl::Span<std::unique_ptr<CachedIntervalData>>>
1031 disjoint_events = SplitEventsInIndendentSets(events);
1032 for (absl::Span<std::unique_ptr<CachedIntervalData>> cluster :
1033 disjoint_events) {
1035 "Cumulative", /* ignore_zero_size_intervals= */ true,
1036 manager->LpValues(), cluster, capacity_max, model, top_n_cuts);
1037 }
1038 top_n_cuts.TransferToManager(manager);
1039 return true;
1040 };
1041 return result;
1042}
1043
1045 SchedulingConstraintHelper* helper, Model* model) {
1046 CutGenerator result;
1047 result.only_run_at_level_zero = true;
1049 helper, model, &result.vars,
1052
1053 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
1054 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1055
1056 std::vector<std::unique_ptr<CachedIntervalData>> events;
1057 for (int t = 0; t < helper->NumTasks(); ++t) {
1058 if (!helper->IsPresent(t)) continue;
1059 auto event = std::make_unique<CachedIntervalData>(t, helper);
1060 event->demand_min = IntegerValue(1);
1061 events.push_back(std::move(event));
1062 }
1063
1064 TopNCuts top_n_cuts(5);
1065 std::vector<absl::Span<std::unique_ptr<CachedIntervalData>>>
1066 disjoint_events = SplitEventsInIndendentSets(events);
1067 for (absl::Span<std::unique_ptr<CachedIntervalData>> cluster :
1068 disjoint_events) {
1070 "NoOverlap", /* ignore_zero_size_intervals= */ false,
1071 manager->LpValues(), cluster, IntegerValue(1), model, top_n_cuts);
1072 }
1073 top_n_cuts.TransferToManager(manager);
1074 return true;
1075 };
1076
1077 return result;
1078}
1079
1082 SchedulingDemandHelper* demands_helper)
1083 : task_index(t),
1084 start_min(helper->StartMin(t)),
1085 start_max(helper->StartMax(t)),
1086 end_min(helper->EndMin(t)),
1087 end_max(helper->EndMax(t)),
1088 size_min(helper->SizeMin(t)),
1089 start(helper->Starts()[t]),
1090 end(helper->Ends()[t]) {
1091 if (demands_helper == nullptr) {
1092 demand_min = 1;
1093 demand_is_fixed = true;
1096 } else {
1097 demand_min = demands_helper->DemandMin(t);
1098 demand_is_fixed = demands_helper->DemandIsFixed(t);
1099 // Default values for energy. Will be updated if decomposed energy is
1100 // not empty.
1103 decomposed_energy = demands_helper->DecomposedEnergies()[t];
1104 if (decomposed_energy.size() == 1) {
1105 // We know everything is propagated, we can remove this field.
1106 decomposed_energy.clear();
1107 }
1108 }
1109}
1110
1112 return absl::StrCat(
1113 "CompletionTimeEvent(task_index = ", task_index,
1114 ", start_min = ", start_min, ", start_max = ", start_max,
1115 ", size_min = ", size_min, ", end = ", end, ", lp_end = ", lp_end,
1116 ", size_min = ", size_min, " demand_min = ", demand_min,
1117 ", demand_is_fixed = ", demand_is_fixed, ", energy_min = ", energy_min,
1118 ", use_decomposed_energy_min = ", use_decomposed_energy_min,
1119 ", lifted = ", lifted, ", decomposed_energy = [",
1120 absl::StrJoin(decomposed_energy, ", ",
1121 [](std::string* out, const LiteralValueValue& e) {
1122 absl::StrAppend(out, e.left_value, " * ", e.right_value);
1123 }),
1124 "]");
1125}
1126
1128 absl::Span<std::unique_ptr<CompletionTimeEvent>> events, Model* model) {
1129 max_task_index_ = 0;
1130 if (events.empty()) return;
1131
1132 // We compute the max_task_index_ from the events early to avoid sorting
1133 // the events if there are too many of them.
1134 for (const auto& event : events) {
1135 max_task_index_ = std::max(max_task_index_, event->task_index);
1136 }
1137 BuildPredecessors(events, model);
1138 VLOG(2) << "num_tasks:" << max_task_index_ + 1
1139 << " num_precedences:" << predecessors_.num_entries()
1140 << " predecessors size:" << predecessors_.size();
1141}
1142
1143void CtExhaustiveHelper::BuildPredecessors(
1144 absl::Span<std::unique_ptr<CompletionTimeEvent>> events, Model* model) {
1145 predecessors_.clear();
1146 if (events.size() > 100) return;
1147
1148 RootLevelLinear2Bounds* root_level_bounds =
1150
1151 std::vector<const CompletionTimeEvent*> sorted_events;
1152 sorted_events.reserve(events.size());
1153 for (const auto& event : events) {
1154 sorted_events.push_back(event.get());
1155 }
1156 std::sort(sorted_events.begin(), sorted_events.end(),
1157 [](const CompletionTimeEvent* a, const CompletionTimeEvent* b) {
1158 return a->task_index < b->task_index;
1159 });
1160
1161 predecessors_.reserve(max_task_index_ + 1);
1162 for (const auto* e1 : sorted_events) {
1163 for (const auto* e2 : sorted_events) {
1164 if (e2->task_index == e1->task_index) continue;
1165 const auto [expr, ub] = EncodeDifferenceLowerThan(e2->end, e1->start, 0);
1166 if (root_level_bounds->GetLevelZeroStatus(expr, kMinIntegerValue, ub) ==
1168 while (predecessors_.size() <= e1->task_index) predecessors_.Add({});
1169 predecessors_.AppendToLastVector(e2->task_index);
1170 }
1171 }
1172 }
1173}
1174
1176 absl::Span<std::unique_ptr<CompletionTimeEvent>> events,
1177 absl::Span<const int> permutation) {
1178 if (predecessors_.num_entries() == 0) return true;
1179 visited_.assign(max_task_index_ + 1, false);
1180 for (int i = permutation.size() - 1; i >= 0; --i) {
1181 const CompletionTimeEvent* event = events[permutation[i]].get();
1182 if (event->task_index >= predecessors_.size()) continue;
1183 for (const int predecessor : predecessors_[event->task_index]) {
1184 if (visited_[predecessor]) return false;
1185 }
1186 visited_[event->task_index] = true;
1187 }
1188 return true;
1189}
1190
1191namespace {
1192
1193bool ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap(
1194 absl::Span<CompletionTimeEvent*> events, absl::Span<const int> permutation,
1195 IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends) {
1196 // Reset the two sums.
1197 sum_of_ends = 0;
1198 sum_of_weighted_ends = 0;
1199
1200 // Loop over the permutation.
1201 IntegerValue end_min_of_previous_task = kMinIntegerValue;
1202 for (const int index : permutation) {
1203 const CompletionTimeEvent* event = events[index];
1204 const IntegerValue task_start_min =
1205 std::max(event->start_min, end_min_of_previous_task);
1206 if (event->start_max < task_start_min) return false; // Infeasible.
1207
1208 end_min_of_previous_task = task_start_min + event->size_min;
1209 sum_of_ends += end_min_of_previous_task;
1210 sum_of_weighted_ends += event->energy_min * end_min_of_previous_task;
1211 }
1212 return true;
1213}
1214
1215// This functions packs all events in a cumulative of capacity 'capacity_max'
1216// following the given permutation. It returns the sum of end mins and the sum
1217// of end mins weighted by event->energy_min.
1218//
1219// It ensures that if event_j is after event_i in the permutation, then
1220// event_j starts exactly at the same time or after event_i.
1221//
1222// It returns false if one event cannot start before event->start_max.
1223bool ComputeWeightedSumOfEndMinsOfOnePermutation(
1224 absl::Span<CompletionTimeEvent*> events, absl::Span<const int> permutation,
1225 IntegerValue capacity_max, CtExhaustiveHelper& helper,
1226 IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends,
1227 bool& cut_use_precedences) {
1228 DCHECK_EQ(permutation.size(), events.size());
1229
1230 if (capacity_max == 1) {
1231 return ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap(
1232 events, permutation, sum_of_ends, sum_of_weighted_ends);
1233 }
1234
1235 // Reset the two sums.
1236 sum_of_ends = 0;
1237 sum_of_weighted_ends = 0;
1238
1239 // Quick check to see if the permutation feasible:
1240 // ei = events[permutation[i]], ej = events[permutation[j]], i < j
1241 // - start_max(ej) >= start_min(ei)
1242 IntegerValue demand_min_of_previous_task = 0;
1243 IntegerValue start_min_of_previous_task = kMinIntegerValue;
1244 IntegerValue end_min_of_previous_task = kMinIntegerValue;
1245 for (const int index : permutation) {
1246 const CompletionTimeEvent* event = events[index];
1247 const IntegerValue threshold = std::max(
1248 event->start_min,
1249 (event->demand_min + demand_min_of_previous_task > capacity_max)
1250 ? end_min_of_previous_task
1251 : start_min_of_previous_task);
1252 if (event->start_max < threshold) {
1253 return false;
1254 }
1255
1256 start_min_of_previous_task = threshold;
1257 end_min_of_previous_task = threshold + event->size_min;
1258 demand_min_of_previous_task = event->demand_min;
1259 }
1260
1261 // The profile (and new profile) is a set of (time, capa_left) pairs,
1262 // ordered by increasing time and capa_left.
1263 helper.profile_.clear();
1264 helper.profile_.emplace_back(kMinIntegerValue, capacity_max);
1265 helper.profile_.emplace_back(kMaxIntegerValue, capacity_max);
1266
1267 // Loop over the permutation.
1268 helper.assigned_ends_.assign(helper.max_task_index() + 1, kMinIntegerValue);
1269 IntegerValue start_of_previous_task = kMinIntegerValue;
1270 for (const int index : permutation) {
1271 const CompletionTimeEvent* event = events[index];
1272 const IntegerValue start_min =
1273 std::max(event->start_min, start_of_previous_task);
1274
1275 // Iterate on the profile to find the step that contains start_min.
1276 // Then push until we find a step with enough capacity.
1277 auto profile_it = helper.profile_.begin();
1278 while ((profile_it + 1)->first <= start_min ||
1279 profile_it->second < event->demand_min) {
1280 ++profile_it;
1281 }
1282
1283 IntegerValue actual_start = std::max(start_min, profile_it->first);
1284 const IntegerValue initial_start_min = actual_start;
1285
1286 // Propagate precedences.
1287 //
1288 // helper.predecessors() can be truncated. We need to be careful here.
1289 if (event->task_index < helper.predecessors().size()) {
1290 for (const int predecessor : helper.predecessors()[event->task_index]) {
1291 if (helper.assigned_ends_[predecessor] == kMinIntegerValue) continue;
1292 actual_start =
1293 std::max(actual_start, helper.assigned_ends_[predecessor]);
1294 }
1295 }
1296
1297 if (actual_start > initial_start_min) {
1298 cut_use_precedences = true;
1299 // Catch up the position on the profile w.r.t. the actual start.
1300 while ((profile_it + 1)->first <= actual_start) ++profile_it;
1301 VLOG(3) << "push from " << initial_start_min << " to " << actual_start;
1302 }
1303
1304 // Compatible with the event->start_max ?
1305 if (actual_start > event->start_max) {
1306 return false;
1307 }
1308
1309 const IntegerValue actual_end = actual_start + event->size_min;
1310
1311 // Bookkeeping.
1312 helper.assigned_ends_[event->task_index] = actual_end;
1313 sum_of_ends += actual_end;
1314 sum_of_weighted_ends += event->energy_min * actual_end;
1315 start_of_previous_task = actual_start;
1316
1317 // No need to update the profile on the last loop.
1318 if (event->task_index == events[permutation.back()]->task_index) break;
1319
1320 // Update the profile.
1321 helper.new_profile_.clear();
1322 const IntegerValue demand_min = event->demand_min;
1323
1324 // Insert the start of the shifted profile.
1325 helper.new_profile_.push_back(
1326 {actual_start, profile_it->second - demand_min});
1327 ++profile_it;
1328
1329 // Copy and modify the part of the profile impacted by the current event.
1330 while (profile_it->first < actual_end) {
1331 helper.new_profile_.push_back(
1332 {profile_it->first, profile_it->second - demand_min});
1333 ++profile_it;
1334 }
1335
1336 // Insert a new event in the profile at the end of the task if needed.
1337 if (profile_it->first > actual_end) {
1338 helper.new_profile_.push_back({actual_end, (profile_it - 1)->second});
1339 }
1340
1341 // Insert the tail of the current profile.
1342 helper.new_profile_.insert(helper.new_profile_.end(), profile_it,
1343 helper.profile_.end());
1344
1345 helper.profile_.swap(helper.new_profile_);
1346 }
1347 return true;
1348}
1349
1350const int kCtExhaustiveTargetSize = 6;
1351// This correspond to the number of permutations the system will explore when
1352// fully exploring all possible sizes and all possible permutations for up to 6
1353// tasks, without any precedence.
1354const int kExplorationLimit = 873; // 1! + 2! + 3! + 4! + 5! + 6!
1355
1356} // namespace
1357
1359 absl::Span<CompletionTimeEvent*> events, IntegerValue capacity_max,
1360 double unweighted_threshold, double weighted_threshold,
1361 CtExhaustiveHelper& helper, double& min_sum_of_ends,
1362 double& min_sum_of_weighted_ends, bool& cut_use_precedences,
1363 int& exploration_credit) {
1364 // Reset the events based sums.
1365 min_sum_of_ends = std::numeric_limits<double>::max();
1366 min_sum_of_weighted_ends = std::numeric_limits<double>::max();
1367 helper.task_to_index_.assign(helper.max_task_index() + 1, -1);
1368 for (int i = 0; i < events.size(); ++i) {
1369 helper.task_to_index_[events[i]->task_index] = i;
1370 }
1371 helper.valid_permutation_iterator_.Reset(events.size());
1372 const auto& predecessors = helper.predecessors();
1373 for (int i = 0; i < events.size(); ++i) {
1374 const int task_i = events[i]->task_index;
1375 if (task_i >= predecessors.size()) continue;
1376 for (const int task_j : predecessors[task_i]) {
1377 const int j = helper.task_to_index_[task_j];
1378 if (j != -1) {
1380 }
1381 }
1382 }
1383
1384 bool is_dag = false;
1385 int num_valid_permutations = 0;
1386 for (const auto& permutation : helper.valid_permutation_iterator_) {
1387 is_dag = true;
1388 if (--exploration_credit < 0) break;
1389
1390 IntegerValue sum_of_ends = 0;
1391 IntegerValue sum_of_weighted_ends = 0;
1392
1393 if (ComputeWeightedSumOfEndMinsOfOnePermutation(
1394 events, permutation, capacity_max, helper, sum_of_ends,
1395 sum_of_weighted_ends, cut_use_precedences)) {
1396 min_sum_of_ends = std::min(ToDouble(sum_of_ends), min_sum_of_ends);
1397 min_sum_of_weighted_ends =
1398 std::min(ToDouble(sum_of_weighted_ends), min_sum_of_weighted_ends);
1399 ++num_valid_permutations;
1400
1401 if (min_sum_of_ends <= unweighted_threshold &&
1402 min_sum_of_weighted_ends <= weighted_threshold) {
1403 break;
1404 }
1405 }
1406 }
1407 if (!is_dag) {
1409 }
1410 const CompletionTimeExplorationStatus status =
1411 exploration_credit < 0 ? CompletionTimeExplorationStatus::ABORTED
1412 : num_valid_permutations > 0
1415 VLOG(2) << "DP: size:" << events.size()
1416 << ", num_valid_permutations:" << num_valid_permutations
1417 << ", min_sum_of_end_mins:" << min_sum_of_ends
1418 << ", min_sum_of_weighted_end_mins:" << min_sum_of_weighted_ends
1419 << ", unweighted_threshold:" << unweighted_threshold
1420 << ", weighted_threshold:" << weighted_threshold
1421 << ", exploration_credit:" << exploration_credit
1422 << ", status:" << status;
1423 return status;
1424}
1425
1426// TODO(user): Improve performance
1427// - detect disjoint tasks (no need to crossover to the second part)
1428// - better caching of explored states
1430 absl::string_view cut_name,
1432 absl::Span<std::unique_ptr<CompletionTimeEvent>> events,
1433 IntegerValue capacity_max, CtExhaustiveHelper& helper, Model* model,
1434 TopNCuts& top_n_cuts,
1435 std::vector<CompletionTimeEvent>& residual_event_storage) {
1436 // Sort by start min to bucketize by start_min.
1437 std::sort(events.begin(), events.end(),
1438 [](const std::unique_ptr<CompletionTimeEvent>& e1,
1439 const std::unique_ptr<CompletionTimeEvent>& e2) {
1440 return std::tie(e1->start_min, e1->energy_min, e1->lp_end,
1441 e1->task_index) <
1442 std::tie(e2->start_min, e2->energy_min, e2->lp_end,
1443 e2->task_index);
1444 });
1445 for (int start = 0; start + 1 < events.size(); ++start) {
1446 // Skip to the next start_min value.
1447 if (start > 0 && events[start]->start_min == events[start - 1]->start_min) {
1448 continue;
1449 }
1450
1451 bool cut_use_precedences = false; // Used for naming the cut.
1452 const IntegerValue sequence_start_min = events[start]->start_min;
1453
1454 // We look at events that start before sequence_start_min, but are forced
1455 // to cross this time point.
1456 residual_event_storage.clear();
1457 helper.residual_events_.clear();
1458 for (int before = 0; before < start; ++before) {
1459 if (events[before]->start_min + events[before]->size_min >
1460 sequence_start_min) {
1461 residual_event_storage.push_back(*events[before]); // Copy.
1462 residual_event_storage.back().lifted = true;
1463 }
1464 }
1465 for (int i = 0; i < residual_event_storage.size(); ++i) {
1466 helper.residual_events_.push_back(&residual_event_storage[i]);
1467 }
1468
1469 // Then we add the event after sequence_start_min.
1470 for (int i = start; i < events.size(); ++i) {
1471 helper.residual_events_.push_back(events[i].get());
1472 }
1473
1474 std::sort(helper.residual_events_.begin(), helper.residual_events_.end(),
1475 [](const CompletionTimeEvent* e1, const CompletionTimeEvent* e2) {
1476 return std::tie(e1->lp_end, e1->task_index) <
1477 std::tie(e2->lp_end, e2->task_index);
1478 });
1479
1480 double sum_of_ends_lp = 0.0;
1481 double sum_of_weighted_ends_lp = 0.0;
1482 IntegerValue sum_of_demands = 0;
1483 double sum_of_square_energies = 0;
1484 double min_sum_of_ends = std::numeric_limits<double>::max();
1485 double min_sum_of_weighted_ends = std::numeric_limits<double>::max();
1486 int exploration_limit = kExplorationLimit;
1487 const int kMaxSize = std::min<int>(helper.residual_events_.size(), 12);
1488
1489 for (int i = 0; i < kMaxSize; ++i) {
1490 const CompletionTimeEvent* event = helper.residual_events_[i];
1491 const double energy = ToDouble(event->energy_min);
1492 sum_of_ends_lp += event->lp_end;
1493 sum_of_weighted_ends_lp += event->lp_end * energy;
1494 sum_of_demands += event->demand_min;
1495 sum_of_square_energies += energy * energy;
1496
1497 // Both cases with 1 or 2 tasks are trivial and independent of the order.
1498 // Also, if the sum of demands is less than or equal to the capacity,
1499 // pushing all ends left is a valid LP assignment. And this assignment
1500 // should be propagated by the lp model.
1501 if (i <= 1 || sum_of_demands <= capacity_max) continue;
1502
1503 absl::Span<CompletionTimeEvent*> tasks_to_explore =
1504 absl::MakeSpan(helper.residual_events_).first(i + 1);
1505 const CompletionTimeExplorationStatus status =
1507 tasks_to_explore, capacity_max,
1508 /* unweighted_threshold= */ sum_of_ends_lp + kMinCutViolation,
1509 /* weighted_threshold= */ sum_of_weighted_ends_lp +
1510 kMinCutViolation,
1511 helper, min_sum_of_ends, min_sum_of_weighted_ends,
1512 cut_use_precedences, exploration_limit);
1514 // TODO(user): We should return false here but there is a bug.
1515 break;
1516 } else if (status == CompletionTimeExplorationStatus::ABORTED) {
1517 break;
1518 }
1519
1520 const double unweigthed_violation =
1521 (min_sum_of_ends - sum_of_ends_lp) / std::sqrt(ToDouble(i + 1));
1522 const double weighted_violation =
1523 (min_sum_of_weighted_ends - sum_of_weighted_ends_lp) /
1524 std::sqrt(sum_of_square_energies);
1525
1526 // Unweighted cuts.
1527 if (unweigthed_violation > weighted_violation &&
1528 unweigthed_violation > kMinCutViolation) {
1529 LinearConstraintBuilder cut(model, min_sum_of_ends, kMaxIntegerValue);
1530 bool is_lifted = false;
1531 for (int j = 0; j <= i; ++j) {
1532 const CompletionTimeEvent* event = helper.residual_events_[j];
1533 is_lifted |= event->lifted;
1534 cut.AddTerm(event->end, IntegerValue(1));
1535 }
1536 std::string full_name(cut_name);
1537 if (cut_use_precedences) full_name.append("_prec");
1538 if (is_lifted) full_name.append("_lifted");
1539 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
1540 }
1541
1542 // Weighted cuts.
1543 if (weighted_violation >= unweigthed_violation &&
1544 weighted_violation > kMinCutViolation) {
1545 LinearConstraintBuilder cut(model, min_sum_of_weighted_ends,
1547 bool is_lifted = false;
1548 for (int j = 0; j <= i; ++j) {
1549 const CompletionTimeEvent* event = helper.residual_events_[j];
1550 is_lifted |= event->lifted;
1551 cut.AddTerm(event->end, event->energy_min);
1552 }
1553 std::string full_name(cut_name);
1554 if (is_lifted) full_name.append("_lifted");
1555 if (cut_use_precedences) full_name.append("_prec");
1556 full_name.append("_weighted");
1557 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
1558 }
1559 }
1560 }
1561 return true;
1562}
1563
1564namespace {
1565// Returns a copy of the event with the start time increased to time.
1566// Energy (min and decomposed) are updated accordingly.
1567void CopyAndTrimEventAfterAndAppendIfPositiveEnergy(
1568 const CompletionTimeEvent* old_event, IntegerValue time,
1569 std::vector<CompletionTimeEvent>& events) {
1570 CHECK_GT(time, old_event->start_min);
1571 CHECK_GT(old_event->start_min + old_event->size_min, time);
1572 CompletionTimeEvent event(*old_event);
1573 event.lifted = true;
1574
1575 // Trim the decomposed energy and compute the energy min in the window.
1576 const IntegerValue shift = time - event.start_min;
1577 CHECK_GT(shift, IntegerValue(0));
1578 event.size_min -= shift;
1579 event.energy_min = event.size_min * event.demand_min;
1580 if (!event.decomposed_energy.empty()) {
1581 // Trim fixed sizes and recompute the decomposed energy min.
1582 IntegerValue propagated_energy_min = kMaxIntegerValue;
1583 for (auto& [literal, fixed_size, fixed_demand] : event.decomposed_energy) {
1584 CHECK_GT(fixed_size, shift);
1585 fixed_size -= shift;
1586 propagated_energy_min =
1587 std::min(propagated_energy_min, fixed_demand * fixed_size);
1588 }
1589
1590 DCHECK_GT(propagated_energy_min, 0);
1591 if (propagated_energy_min > event.energy_min) {
1592 event.use_decomposed_energy_min = true;
1593 event.energy_min = propagated_energy_min;
1594 } else {
1595 event.use_decomposed_energy_min = false;
1596 }
1597 }
1598 event.start_min = time;
1599 if (event.energy_min > 0) {
1600 events.push_back(std::move(event));
1601 }
1602}
1603
1604// Collects all possible demand values for the event, and adds them to the
1605// subset sum of the reachable capacity of the cumulative constraint.
1606void AddEventDemandsToCapacitySubsetSum(
1607 const CompletionTimeEvent* event, const VariablesAssignment& assignment,
1608 IntegerValue capacity_max, std::vector<int64_t>& tmp_possible_demands,
1609 MaxBoundedSubsetSum& dp) {
1610 if (dp.CurrentMax() != capacity_max) {
1611 if (event->demand_is_fixed) {
1612 dp.Add(event->demand_min.value());
1613 } else if (!event->decomposed_energy.empty()) {
1614 tmp_possible_demands.clear();
1615 for (const auto& [literal, size, demand] : event->decomposed_energy) {
1616 if (assignment.LiteralIsFalse(literal)) continue;
1617 if (assignment.LiteralIsTrue(literal)) {
1618 tmp_possible_demands.assign({demand.value()});
1619 break;
1620 }
1621 tmp_possible_demands.push_back(demand.value());
1622 }
1623 dp.AddChoices(tmp_possible_demands);
1624 } else { // event->demand_min is not fixed, we abort.
1625 // TODO(user): We could still add all events in the range if the range
1626 // is small.
1627 dp.Add(capacity_max.value());
1628 }
1629 }
1630}
1631
1632} // namespace
1633
1634// We generate the cut from the Smith's rule from:
1635// M. Queyranne, Structure of a simple scheduling polyhedron,
1636// Mathematical Programming 58 (1993), 263–285
1637//
1638// The original cut is:
1639// sum(end_min_i * size_min_i) >=
1640// (sum(size_min_i^2) + sum(size_min_i)^2) / 2
1641// We strengthen this cuts by noticing that if all tasks starts after S,
1642// then replacing end_min_i by (end_min_i - S) is still valid.
1643//
1644// A second difference is that we lift intervals that starts before a given
1645// value, but are forced to cross it. This lifting procedure implies trimming
1646// interval to its part that is after the given value.
1647//
1648// In the case of a cumulative constraint with a capacity of C, we compute a
1649// valid equation by splitting the task (size_min si, demand_min di) into di
1650// tasks of size si and demand 1, that we spread across C no_overlap
1651// constraint. When doing so, the lhs of the equation is the same, the first
1652// term of the rhs is also unchanged. A lower bound of the second term of the
1653// rhs is reached when the split is exact (each no_overlap sees a long demand of
1654// sum(si * di / C). Thus the second term is greater or equal to
1655// (sum (si * di) ^ 2) / (2 * C)
1656//
1657// Sometimes, the min energy of the task i is greater than si * di.
1658// Let's introduce ai the minimum energy of the task and rewrite the previous
1659// equation. In that new setting, we can rewrite the cumulative transformation
1660// by splitting each tasks into at least di tasks of size at least si and demand
1661// 1.
1662//
1663// In that setting, the lhs is rewritten as sum(ai * ei) and the second term of
1664// the rhs is rewritten as sum(ai) ^ 2 / (2 * C).
1665//
1666// The question is how to rewrite the term `sum(di * si * si). The minimum
1667// contribution is when the task has size si and demand ai / si. (note that si
1668// is the minimum size of the task, and di its minimum demand). We can
1669// replace the small rectangle area term by ai * si.
1670//
1671// sum (ai * ei) - sum (ai) * current_start_min >=
1672// sum(si * ai) / 2 + (sum (ai) ^ 2) / (2 * C)
1673//
1674// The separation procedure is implemented using two loops:
1675// - first, we loop on potential start times in increasing order.
1676// - second loop, we add tasks that must contribute after this start time
1677// ordered by increasing end time in the LP relaxation.
1679 absl::string_view cut_name,
1681 absl::Span<std::unique_ptr<CompletionTimeEvent>> events,
1682 IntegerValue capacity_max, Model* model, TopNCuts& top_n_cuts,
1683 std::vector<CompletionTimeEvent>& residual_event_storage) {
1684 const VariablesAssignment& assignment =
1685 model->GetOrCreate<Trail>()->Assignment();
1686 std::vector<int64_t> tmp_possible_demands;
1687
1688 // Sort by start min to bucketize by start_min.
1689 absl::c_stable_sort(
1690 events, [](const std::unique_ptr<CompletionTimeEvent>& e1,
1691 const std::unique_ptr<CompletionTimeEvent>& e2) {
1692 return std::tie(e1->start_min, e1->demand_min, e1->lp_end) <
1693 std::tie(e2->start_min, e2->demand_min, e2->lp_end);
1694 });
1695
1696 // First loop: we loop on potential start times.
1698 std::vector<CompletionTimeEvent*> residual_tasks;
1699 for (int start = 0; start + 1 < events.size(); ++start) {
1700 // Skip to the next start_min value.
1701 if (start > 0 && events[start]->start_min == events[start - 1]->start_min) {
1702 continue;
1703 }
1704
1705 const IntegerValue sequence_start_min = events[start]->start_min;
1706 residual_tasks.clear();
1707 for (int i = start; i < events.size(); ++i) {
1708 residual_tasks.push_back(events[i].get());
1709 }
1710
1711 // We look at events that start before sequence_start_min, but are forced
1712 // to cross this time point. In that case, we replace this event by a
1713 // truncated event starting at sequence_start_min. To do this, we reduce
1714 // the size_min, align the start_min with the sequence_start_min, and
1715 // scale the energy down accordingly.
1716 residual_event_storage.clear();
1717 for (int before = 0; before < start; ++before) {
1718 if (events[before]->start_min + events[before]->size_min >
1719 sequence_start_min) {
1720 CopyAndTrimEventAfterAndAppendIfPositiveEnergy(
1721 events[before].get(), sequence_start_min, residual_event_storage);
1722 }
1723 }
1724 for (int i = 0; i < residual_event_storage.size(); ++i) {
1725 residual_tasks.push_back(&residual_event_storage[i]);
1726 }
1727
1728 // If we have less than kCtExhaustiveTargetSize tasks, we are already
1729 // covered by the exhaustive cut generator.
1730 if (residual_tasks.size() <= kCtExhaustiveTargetSize) continue;
1731
1732 // Best cut so far for this loop.
1733 int best_end = -1;
1734 double best_efficacy = 0.01;
1735 IntegerValue best_min_contrib = 0;
1736 bool best_uses_subset_sum = false;
1737
1738 // Used in the first term of the rhs of the equation.
1739 IntegerValue sum_event_contributions = 0;
1740 // Used in the second term of the rhs of the equation.
1741 IntegerValue sum_energy = 0;
1742 // For normalization.
1743 IntegerValue sum_square_energy = 0;
1744
1745 double lp_contrib = 0.0;
1746 IntegerValue current_start_min(kMaxIntegerValue);
1747
1748 dp.Reset(capacity_max.value());
1749
1750 // We will add tasks one by one, sorted by end time, and evaluate the
1751 // potential cut at each step.
1752 absl::c_stable_sort(residual_tasks, [](const CompletionTimeEvent* e1,
1753 const CompletionTimeEvent* e2) {
1754 return e1->lp_end < e2->lp_end;
1755 });
1756
1757 // Second loop: we add tasks one by one.
1758 for (int i = 0; i < residual_tasks.size(); ++i) {
1759 const CompletionTimeEvent* event = residual_tasks[i];
1760 DCHECK_GE(event->start_min, sequence_start_min);
1761
1762 // Make sure we do not overflow.
1763 if (!AddTo(event->energy_min, &sum_energy)) break;
1764 // In the no_overlap case, we have:
1765 // area = event->size_min ^ 2
1766 // In the simple cumulative case, we split split the task
1767 // (demand_min, size_min) into demand_min tasks in the no_overlap case.
1768 // area = event->demand_min * event->size_min * event->size_min
1769 // In the cumulative case, we can have energy_min > side_min * demand_min.
1770 // In that case, we use energy_min * size_min.
1771 if (!AddProductTo(event->energy_min, event->size_min,
1772 &sum_event_contributions)) {
1773 break;
1774 }
1775 if (!AddSquareTo(event->energy_min, &sum_square_energy)) break;
1776
1777 lp_contrib += event->lp_end * ToDouble(event->energy_min);
1778 current_start_min = std::min(current_start_min, event->start_min);
1779
1780 // Maintain the reachable capacity with a bounded complexity subset sum.
1781 AddEventDemandsToCapacitySubsetSum(event, assignment, capacity_max,
1782 tmp_possible_demands, dp);
1783
1784 // Ignore cuts covered by the exhaustive cut generator.
1785 if (i < kCtExhaustiveTargetSize) continue;
1786
1787 const IntegerValue reachable_capacity = dp.CurrentMax();
1788
1789 // Do we have a violated cut ?
1790 const IntegerValue large_rectangle_contrib =
1791 CapProdI(sum_energy, sum_energy);
1792 if (AtMinOrMaxInt64I(large_rectangle_contrib)) break;
1793 const IntegerValue mean_large_rectangle_contrib =
1794 CeilRatio(large_rectangle_contrib, reachable_capacity);
1795
1796 IntegerValue min_contrib =
1797 CapAddI(sum_event_contributions, mean_large_rectangle_contrib);
1798 if (AtMinOrMaxInt64I(min_contrib)) break;
1799 min_contrib = CeilRatio(min_contrib, 2);
1800
1801 // shift contribution by current_start_min.
1802 if (!AddProductTo(sum_energy, current_start_min, &min_contrib)) break;
1803
1804 // The efficacy of the cut is the normalized violation of the above
1805 // equation. We will normalize by the sqrt of the sum of squared energies.
1806 const double efficacy = (ToDouble(min_contrib) - lp_contrib) /
1807 std::sqrt(ToDouble(sum_square_energy));
1808
1809 // For a given start time, we only keep the best cut.
1810 // The reason is that is the cut is strongly violated, we can get a
1811 // sequence of violated cuts as we add more tasks. These new cuts will
1812 // be less violated, but will not bring anything useful to the LP
1813 // relaxation. At the same time, this sequence of cuts can push out
1814 // other cuts from a disjoint set of tasks.
1815 if (efficacy > best_efficacy) {
1816 best_efficacy = efficacy;
1817 best_end = i;
1818 best_min_contrib = min_contrib;
1819 best_uses_subset_sum = reachable_capacity < capacity_max;
1820 }
1821 }
1822
1823 // We have inserted all tasks. Have we found a violated cut ?
1824 // If so, add the most violated one to the top_n cut container.
1825 if (best_end != -1) {
1826 LinearConstraintBuilder cut(model, best_min_contrib, kMaxIntegerValue);
1827 bool is_lifted = false;
1828 bool add_energy_to_name = false;
1829 for (int i = 0; i <= best_end; ++i) {
1830 const CompletionTimeEvent* event = residual_tasks[i];
1831 is_lifted |= event->lifted;
1832 add_energy_to_name |= event->use_decomposed_energy_min;
1833 cut.AddTerm(event->end, event->energy_min);
1834 }
1835 std::string full_name(cut_name);
1836 if (add_energy_to_name) full_name.append("_energy");
1837 if (is_lifted) full_name.append("_lifted");
1838 if (best_uses_subset_sum) full_name.append("_subsetsum");
1839 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
1840 }
1841 }
1842}
1843
1845 SchedulingConstraintHelper* helper, Model* model) {
1846 CutGenerator result;
1847 result.only_run_at_level_zero = true;
1848 AddIntegerVariableFromIntervals(helper, model, &result.vars,
1851
1852 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
1853 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1854
1855 auto generate_cuts = [model, manager,
1856 helper](bool time_is_forward) -> bool {
1857 if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) {
1858 return false;
1859 }
1860 std::vector<std::unique_ptr<CompletionTimeEvent>> events;
1861 const auto& lp_values = manager->LpValues();
1862 for (int index = 0; index < helper->NumTasks(); ++index) {
1863 if (!helper->IsPresent(index)) continue;
1864 const IntegerValue size_min = helper->SizeMin(index);
1865 if (size_min > 0) {
1866 auto event =
1867 std::make_unique<CompletionTimeEvent>(index, helper, nullptr);
1868 event->lp_end = event->end.LpValue(lp_values);
1869 events.push_back(std::move(event));
1870 }
1871 }
1872
1873 CtExhaustiveHelper helper;
1874 helper.Init(absl::MakeSpan(events), model);
1875
1876 TopNCuts top_n_cuts(5);
1877 std::vector<CompletionTimeEvent> residual_event_storage;
1878 std::vector<absl::Span<std::unique_ptr<CompletionTimeEvent>>>
1879 disjoint_events = SplitEventsInIndendentSets(events);
1880 for (const absl::Span<std::unique_ptr<CompletionTimeEvent>>& cluster :
1881 disjoint_events) {
1883 "NoOverlapCompletionTimeExhaustive", lp_values, cluster,
1884 /*capacity_max=*/IntegerValue(1), helper, model, top_n_cuts,
1885 residual_event_storage)) {
1886 return false;
1887 }
1888
1890 "NoOverlapCompletionTimeQueyrane", lp_values, cluster,
1891 /*capacity_max=*/IntegerValue(1), model, top_n_cuts,
1892 residual_event_storage);
1893 }
1894 top_n_cuts.TransferToManager(manager);
1895 return true;
1896 };
1897 if (!generate_cuts(/*time_is_forward=*/true)) return false;
1898 if (!generate_cuts(/*time_is_forward=*/false)) return false;
1899 return true;
1900 };
1901 return result;
1902}
1903
1905 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
1906 const AffineExpression& capacity, Model* model) {
1907 CutGenerator result;
1908 result.only_run_at_level_zero = true;
1909 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
1910 &result.vars);
1911 AddIntegerVariableFromIntervals(helper, model, &result.vars,
1914
1915 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1916 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
1917 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
1918 sat_solver,
1919 model](LinearConstraintManager* manager) -> bool {
1920 auto generate_cuts = [integer_trail, sat_solver, model, manager, helper,
1921 demands_helper,
1922 capacity](bool time_is_forward) -> bool {
1923 DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0);
1924 if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) {
1925 return false;
1926 }
1927 if (!demands_helper->CacheAllEnergyValues()) return true;
1928
1929 std::vector<std::unique_ptr<CompletionTimeEvent>> events;
1930 const auto& lp_values = manager->LpValues();
1931 const VariablesAssignment& assignment = sat_solver->Assignment();
1932 for (int index = 0; index < helper->NumTasks(); ++index) {
1933 if (!helper->IsPresent(index)) continue;
1934 if (!DecomposedEnergyIsPropagated(assignment, index, helper,
1935 demands_helper)) {
1936 return true; // Propagation did not reach a fixed point. Abort.
1937 }
1938 if (helper->SizeMin(index) > 0 &&
1939 demands_helper->DemandMin(index) > 0) {
1940 auto event = std::make_unique<CompletionTimeEvent>(index, helper,
1941 demands_helper);
1942 event->lp_end = event->end.LpValue(lp_values);
1943 events.push_back(std::move(event));
1944 }
1945 }
1946
1947 CtExhaustiveHelper helper;
1948 helper.Init(absl::MakeSpan(events), model);
1949
1950 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
1951 TopNCuts top_n_cuts(5);
1952 std::vector<absl::Span<std::unique_ptr<CompletionTimeEvent>>>
1953 disjoint_events = SplitEventsInIndendentSets(events);
1954 std::vector<CompletionTimeEvent> residual_event_storage;
1955 for (absl::Span<std::unique_ptr<CompletionTimeEvent>> cluster :
1956 disjoint_events) {
1958 "CumulativeCompletionTimeExhaustive", lp_values, cluster,
1959 capacity_max, helper, model, top_n_cuts,
1960 residual_event_storage)) {
1961 return false;
1962 }
1963
1965 "CumulativeCompletionTimeQueyrane", lp_values, cluster,
1966 capacity_max, model, top_n_cuts, residual_event_storage);
1967 }
1968 top_n_cuts.TransferToManager(manager);
1969 return true;
1970 };
1971
1972 if (!generate_cuts(/*time_is_forward=*/true)) return false;
1973 if (!generate_cuts(/*time_is_forward=*/false)) return false;
1974 return true;
1975 };
1976 return result;
1977}
1978
1979} // namespace sat
1980} // namespace operations_research
void AppendToLastVector(const V &value)
Definition util.h:944
int Add(absl::Span< const V > values)
Definition util.h:935
const CompactVectorVector< int > & predecessors() const
void Init(absl::Span< std::unique_ptr< CompletionTimeEvent > > events, Model *model)
bool PermutationIsCompatibleWithPrecedences(absl::Span< std::unique_ptr< CompletionTimeEvent > > events, absl::Span< const int > permutation)
std::vector< CompletionTimeEvent * > residual_events_
DagTopologicalSortIterator valid_permutation_iterator_
IntegerValue LowerBound(IntegerVariable i) const
Definition integer.h:1537
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1541
IntegerValue FixedValue(IntegerVariable i) const
Definition integer.h:1549
bool IsFixed(IntegerVariable i) const
Definition integer.h:1545
void AddQuadraticLowerBound(AffineExpression left, AffineExpression right, IntegerTrail *integer_trail, bool *is_quadratic=nullptr)
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff=IntegerValue(1))
void AddTerm(IntegerVariable var, IntegerValue coeff)
void AddLinearExpression(const LinearExpression &expr)
LinearConstraint BuildConstraint(IntegerValue lb, IntegerValue ub)
ABSL_MUST_USE_RESULT bool AddDecomposedProduct(absl::Span< const LiteralValueValue > product)
LiteralIndex Index() const
Definition sat_base.h:92
void AddChoices(absl::Span< const int64_t > choices)
Definition util.cc:503
RelationStatus GetLevelZeroStatus(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const
absl::Span< const AffineExpression > Sizes() const
ABSL_MUST_USE_RESULT bool SynchronizeAndSetTimeDirection(bool is_forward)
const std::vector< std::vector< LiteralValueValue > > & DecomposedEnergies() const
const std::vector< AffineExpression > & Demands() const
ABSL_MUST_USE_RESULT bool AddLinearizedDemand(int t, LinearConstraintBuilder *builder) const
void TransferToManager(LinearConstraintManager *manager)
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:55
bool AddSquareTo(IntegerValue a, IntegerValue *result)
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
CutGenerator CreateCumulativeCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins(absl::Span< CompletionTimeEvent * > events, IntegerValue capacity_max, double unweighted_threshold, double weighted_threshold, CtExhaustiveHelper &helper, double &min_sum_of_ends, double &min_sum_of_weighted_ends, bool &cut_use_precedences, int &exploration_credit)
bool AddTo(IntegerValue a, IntegerValue *result)
void GenerateCutsBetweenPairOfNonOverlappingTasks(absl::string_view cut_name, bool ignore_zero_size_intervals, const util_intops::StrongVector< IntegerVariable, double > &lp_values, absl::Span< std::unique_ptr< CachedIntervalData > > events, IntegerValue capacity_max, Model *model, TopNCuts &top_n_cuts)
IntegerValue CeilRatio(IntegerValue dividend, IntegerValue positive_divisor)
const LiteralIndex kNoLiteralIndex(-1)
ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, absl::Span< std::unique_ptr< CompletionTimeEvent > > events, IntegerValue capacity_max, CtExhaustiveHelper &helper, Model *model, TopNCuts &top_n_cuts, std::vector< CompletionTimeEvent > &residual_event_storage)
CutGenerator CreateCumulativePrecedenceCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, absl::Span< std::unique_ptr< CompletionTimeEvent > > events, IntegerValue capacity_max, Model *model, TopNCuts &top_n_cuts, std::vector< CompletionTimeEvent > &residual_event_storage)
std::vector< absl::Span< std::unique_ptr< E > > > SplitEventsInIndendentSets(std::vector< std::unique_ptr< E > > &events)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
CutGenerator CreateCumulativeEnergyCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, const std::optional< AffineExpression > &makespan, Model *model)
void AddIntegerVariableFromIntervals(const SchedulingConstraintHelper *helper, Model *model, std::vector< IntegerVariable > *vars, int mask)
CutGenerator CreateCumulativeTimeTableCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void AppendVariablesFromCapacityAndDemands(const AffineExpression &capacity, SchedulingDemandHelper *demands_helper, Model *model, std::vector< IntegerVariable > *vars)
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, Model *model)
IntegerValue CapAddI(IntegerValue a, IntegerValue b)
CutGenerator CreateNoOverlapEnergyCutGenerator(SchedulingConstraintHelper *helper, const std::optional< AffineExpression > &makespan, Model *model)
std::pair< LinearExpression2, IntegerValue > EncodeDifferenceLowerThan(AffineExpression a, AffineExpression b, IntegerValue ub)
CutGenerator CreateNoOverlapPrecedenceCutGenerator(SchedulingConstraintHelper *helper, Model *model)
void GenerateCumulativeEnergeticCuts(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, absl::Span< std::unique_ptr< EnergyEvent > > events, const AffineExpression &capacity, TimeLimit *time_limit, Model *model, TopNCuts &top_n_cuts)
bool AtMinOrMaxInt64I(IntegerValue t)
void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, absl::Span< std::unique_ptr< EnergyEvent > > events, IntegerValue capacity, AffineExpression makespan, TimeLimit *time_limit, Model *model, TopNCuts &top_n_cuts)
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
double ToDouble(IntegerValue value)
IntegerValue ComputeEnergyMinInWindow(IntegerValue start_min, IntegerValue start_max, IntegerValue end_min, IntegerValue end_max, IntegerValue size_min, IntegerValue demand_min, absl::Span< const LiteralValueValue > filtered_energy, IntegerValue window_start, IntegerValue window_end)
OR-Tools root namespace.
ClosedInterval::Iterator end(ClosedInterval interval)
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
CachedIntervalData(int t, SchedulingConstraintHelper *helper)
std::vector< LiteralValueValue > decomposed_energy
CompletionTimeEvent(int t, SchedulingConstraintHelper *x_helper, SchedulingDemandHelper *demands_helper)
absl::AnyInvocable< bool(LinearConstraintManager *manager)> generate_cuts
Definition cuts.h:60
std::vector< IntegerVariable > vars
Definition cuts.h:59
ABSL_MUST_USE_RESULT bool FillEnergyLp(AffineExpression size, const util_intops::StrongVector< IntegerVariable, double > &lp_values, Model *model)
EnergyEvent(int t, SchedulingConstraintHelper *x_helper)
IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const
std::vector< LiteralValueValue > decomposed_energy
double NormalizedViolation(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const