Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
scheduling_cuts.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdint.h>
17
18#include <algorithm>
19#include <cmath>
20#include <functional>
21#include <limits>
22#include <optional>
23#include <string>
24#include <tuple>
25#include <utility>
26#include <vector>
27
28#include "absl/base/attributes.h"
29#include "absl/container/btree_set.h"
30#include "absl/container/flat_hash_map.h"
31#include "absl/log/check.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/string_view.h"
34#include "absl/types/span.h"
38#include "ortools/sat/cuts.h"
39#include "ortools/sat/integer.h"
44#include "ortools/sat/model.h"
47#include "ortools/sat/util.h"
51
52namespace operations_research {
53namespace sat {
54
55namespace {
56// Minimum amount of violation of the cut constraint by the solution. This
57// is needed to avoid numerical issues and adding cuts with minor effect.
58const double kMinCutViolation = 1e-4;
59} // namespace
60
62 : x_start_min(x_helper->StartMin(t)),
63 x_start_max(x_helper->StartMax(t)),
64 x_end_min(x_helper->EndMin(t)),
65 x_end_max(x_helper->EndMax(t)),
66 x_size_min(x_helper->SizeMin(t)) {}
67
70 : BaseEvent(t, x_helper) {}
71
72 // We need this for linearizing the energy in some cases.
74
75 // If set, this event is optional and its presence is controlled by this.
77
78 // A linear expression which is a valid lower bound on the total energy of
79 // this event. We also cache the activity of the expression to not recompute
80 // it all the time.
83
84 // True if linearized_energy is not exact and a McCormick relaxation.
85 bool energy_is_quadratic = false;
86
87 // The actual value of the presence literal of the interval(s) is checked
88 // when the event is created. A value of kNoLiteralIndex indicates that either
89 // the interval was not optional, or that its presence literal is true at
90 // level zero.
92
93 // Computes the mandatory minimal overlap of the interval with the time window
94 // [start, end].
95 IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const {
96 return std::max(std::min({x_end_min - start, end - x_start_max, x_size_min,
97 end - start}),
98 IntegerValue(0));
99 }
100
101 // This method expects all the other fields to have been filled before.
102 // It must be called before the EnergyEvent is used.
103 ABSL_MUST_USE_RESULT bool FillEnergyLp(
104 AffineExpression x_size,
106 Model* model) {
107 LinearConstraintBuilder tmp_energy(model);
108 if (IsPresent()) {
109 if (!decomposed_energy.empty()) {
110 if (!tmp_energy.AddDecomposedProduct(decomposed_energy)) return false;
111 } else {
112 tmp_energy.AddQuadraticLowerBound(x_size, y_size,
113 model->GetOrCreate<IntegerTrail>(),
115 }
116 } else {
118 energy_min)) {
119 return false;
120 }
121 }
122 linearized_energy = tmp_energy.BuildExpression();
124 return true;
125 }
126
127 std::string DebugString() const {
128 return absl::StrCat(
129 "EnergyEvent(x_start_min = ", x_start_min.value(),
130 ", x_start_max = ", x_start_max.value(),
131 ", x_end_min = ", x_end_min.value(),
132 ", x_end_max = ", x_end_max.value(),
133 ", y_size = ", y_size.DebugString(), ", energy = ",
134 decomposed_energy.empty()
135 ? "{}"
136 : absl::StrCat(decomposed_energy.size(), " terms"),
137 ", presence_literal_index = ", presence_literal_index.value(), ")");
138 }
139};
140
141namespace {
142
143// Compute the energetic contribution of a task in a given time window, and
144// add it to the cut. It returns false if it tried to generate the cut, and
145// failed.
146ABSL_MUST_USE_RESULT bool AddOneEvent(
147 const EnergyEvent& event, IntegerValue window_start,
148 IntegerValue window_end, LinearConstraintBuilder* cut,
149 bool* add_energy_to_name = nullptr, bool* add_quadratic_to_name = nullptr,
150 bool* add_opt_to_name = nullptr, bool* add_lifted_to_name = nullptr) {
151 DCHECK(cut != nullptr);
152
153 if (event.x_end_min <= window_start || event.x_start_max >= window_end) {
154 return true; // Event can move outside the time window.
155 }
156
157 if (event.x_start_min >= window_start && event.x_end_max <= window_end) {
158 // Event is always contained by the time window.
159 cut->AddLinearExpression(event.linearized_energy);
160
161 if (event.energy_is_quadratic && add_quadratic_to_name != nullptr) {
162 *add_quadratic_to_name = true;
163 }
164 if (add_energy_to_name != nullptr &&
165 event.energy_min > event.x_size_min * event.y_size_min) {
166 *add_energy_to_name = true;
167 }
168 if (!event.IsPresent() && add_opt_to_name != nullptr) {
169 *add_opt_to_name = true;
170 }
171 } else { // The event has a mandatory overlap with the time window.
172 const IntegerValue min_overlap =
173 event.GetMinOverlap(window_start, window_end);
174 if (min_overlap <= 0) return true;
175 if (add_lifted_to_name != nullptr) *add_lifted_to_name = true;
176
177 if (event.IsPresent()) {
178 const std::vector<LiteralValueValue>& energy = event.decomposed_energy;
179 if (energy.empty()) {
180 cut->AddTerm(event.y_size, min_overlap);
181 } else {
182 const IntegerValue window_size = window_end - window_start;
183 for (const auto [lit, fixed_size, fixed_demand] : energy) {
184 const IntegerValue alt_end_min =
185 std::max(event.x_end_min, event.x_start_min + fixed_size);
186 const IntegerValue alt_start_max =
187 std::min(event.x_start_max, event.x_end_max - fixed_size);
188 const IntegerValue energy_min =
189 fixed_demand *
190 std::min({alt_end_min - window_start, window_end - alt_start_max,
191 fixed_size, window_size});
192 if (energy_min == 0) continue;
193 if (!cut->AddLiteralTerm(lit, energy_min)) return false;
194 }
195 if (add_energy_to_name != nullptr) *add_energy_to_name = true;
196 }
197 } else {
198 if (add_opt_to_name != nullptr) *add_opt_to_name = true;
199 const IntegerValue min_energy = ComputeEnergyMinInWindow(
200 event.x_start_min, event.x_start_max, event.x_end_min,
201 event.x_end_max, event.x_size_min, event.y_size_min,
202 event.decomposed_energy, window_start, window_end);
203 if (min_energy > event.x_size_min * event.y_size_min &&
204 add_energy_to_name != nullptr) {
205 *add_energy_to_name = true;
206 }
207 if (!cut->AddLiteralTerm(Literal(event.presence_literal_index),
208 min_energy)) {
209 return false;
210 }
211 }
212 }
213 return true;
214}
215
216// Returns the list of all possible demand values for the given event.
217// It returns an empty vector is the number of values is too large.
218std::vector<int64_t> FindPossibleDemands(const EnergyEvent& event,
219 const VariablesAssignment& assignment,
220 IntegerTrail* integer_trail) {
221 std::vector<int64_t> possible_demands;
222 if (event.decomposed_energy.empty()) {
223 if (integer_trail->IsFixed(event.y_size)) {
224 possible_demands.push_back(
225 integer_trail->FixedValue(event.y_size).value());
226 } else {
227 if (integer_trail->InitialVariableDomain(event.y_size.var).Size() >
228 1000000) {
229 return {};
230 }
231 for (const int64_t var_value :
232 integer_trail->InitialVariableDomain(event.y_size.var).Values()) {
233 possible_demands.push_back(event.y_size.ValueAt(var_value).value());
234 }
235 }
236 } else {
237 for (const auto [lit, fixed_size, fixed_demand] : event.decomposed_energy) {
238 if (assignment.LiteralIsFalse(lit)) continue;
239 possible_demands.push_back(fixed_demand.value());
240 }
241 }
242 return possible_demands;
243}
244
245// This generates the actual cut and compute its activity vs the
246// available_energy_lp.
247bool CutIsEfficient(
248 absl::Span<const EnergyEvent> events, IntegerValue window_start,
249 IntegerValue window_end, double available_energy_lp,
250 const util_intops::StrongVector<IntegerVariable, double>& lp_values,
251 LinearConstraintBuilder* temp_builder) {
252 temp_builder->Clear();
253 for (const EnergyEvent& event : events) {
254 if (!AddOneEvent(event, window_start, window_end, temp_builder)) {
255 return false;
256 }
257 }
258 return temp_builder->BuildExpression().LpValue(lp_values) >=
259 available_energy_lp * (1.0 + kMinCutViolation);
260}
261
262} // namespace
263
264// This cumulative energetic cut generator will split the cumulative span in 2
265// regions.
266//
267// In the region before the min of the makespan, we will compute a more
268// precise reachable profile and have a better estimation of the energy
269// available between two time point. the improvement can come from two sources:
270// - subset sum indicates that the max capacity cannot be reached.
271// - sum of demands < max capacity.
272//
273// In the region after the min of the makespan, we will use
274// fixed_capacity * (makespan - makespan_min)
275// as the available energy.
277 absl::string_view cut_name,
279 std::vector<EnergyEvent> events, IntegerValue capacity,
280 AffineExpression makespan, TimeLimit* time_limit, Model* model,
281 LinearConstraintManager* manager) {
282 // Checks the precondition of the code.
283 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
284 DCHECK(integer_trail->IsFixed(capacity));
285
286 const VariablesAssignment& assignment =
287 model->GetOrCreate<Trail>()->Assignment();
288
289 // Currently, we look at all the possible time windows, and will push all cuts
290 // in the TopNCuts object. From our observations, this generator creates only
291 // a few cuts for a given run.
292 //
293 // The complexity of this loop is n^3. if we follow the latest research, we
294 // could implement this in n log^2(n). Still, this is not visible in the
295 // profile as we only this method at the root node,
296 struct OverloadedTimeWindowWithMakespan {
297 IntegerValue start;
298 IntegerValue end;
299 IntegerValue fixed_energy_rhs; // Can be complemented by the makespan.
300 bool use_makespan = false;
301 bool use_subset_sum = false;
302 };
303
304 std::vector<OverloadedTimeWindowWithMakespan> overloaded_time_windows;
305 // Compute relevant time points.
306 // TODO(user): We could reduce this set.
307 // TODO(user): we can compute the max usage between makespan_min and
308 // makespan_max.
309 std::vector<IntegerValue> time_points;
310 std::vector<std::vector<int64_t>> possible_demands(events.size());
311 const IntegerValue makespan_min = integer_trail->LowerBound(makespan);
312 IntegerValue max_end_min = kMinIntegerValue; // Used to abort early.
313 IntegerValue max_end_max = kMinIntegerValue; // Used as a sentinel.
314 for (int i = 0; i < events.size(); ++i) {
315 const EnergyEvent& event = events[i];
316 if (event.x_start_min < makespan_min) {
317 time_points.push_back(event.x_start_min);
318 }
319 if (event.x_start_max < makespan_min) {
320 time_points.push_back(event.x_start_max);
321 }
322 if (event.x_end_min < makespan_min) {
323 time_points.push_back(event.x_end_min);
324 }
325 if (event.x_end_max < makespan_min) {
326 time_points.push_back(event.x_end_max);
327 }
328 max_end_min = std::max(max_end_min, event.x_end_min);
329 max_end_max = std::max(max_end_max, event.x_end_max);
330 possible_demands[i] = FindPossibleDemands(event, assignment, integer_trail);
331 }
332 time_points.push_back(makespan_min);
333 time_points.push_back(max_end_max);
335
336 const int num_time_points = time_points.size();
337 absl::flat_hash_map<IntegerValue, IntegerValue> reachable_capacity_ending_at;
338
339 MaxBoundedSubsetSum reachable_capacity_subset_sum(capacity.value());
340 for (int i = 1; i < num_time_points; ++i) {
341 const IntegerValue window_start = time_points[i - 1];
342 const IntegerValue window_end = time_points[i];
343 reachable_capacity_subset_sum.Reset(capacity.value());
344 for (int i = 0; i < events.size(); ++i) {
345 const EnergyEvent& event = events[i];
346 if (event.x_start_min >= window_end || event.x_end_max <= window_start) {
347 continue;
348 }
349 if (possible_demands[i].empty()) { // Number of values was too large.
350 // In practice, it stops the DP as the upper bound is reached.
351 reachable_capacity_subset_sum.Add(capacity.value());
352 } else {
353 reachable_capacity_subset_sum.AddChoices(possible_demands[i]);
354 }
355 if (reachable_capacity_subset_sum.CurrentMax() == capacity.value()) break;
356 }
357 reachable_capacity_ending_at[window_end] =
358 reachable_capacity_subset_sum.CurrentMax();
359 }
360
361 const double capacity_lp = ToDouble(capacity);
362 const double makespan_lp = makespan.LpValue(lp_values);
363 const double makespan_min_lp = ToDouble(makespan_min);
364 LinearConstraintBuilder temp_builder(model);
365 for (int i = 0; i + 1 < num_time_points; ++i) {
366 // Checks the time limit if the problem is too big.
367 if (events.size() > 50 && time_limit->LimitReached()) return;
368
369 const IntegerValue window_start = time_points[i];
370 // After max_end_min, all tasks can fit before window_start.
371 if (window_start >= max_end_min) break;
372
373 IntegerValue cumulated_max_energy = 0;
374 IntegerValue cumulated_max_energy_before_makespan_min = 0;
375 bool use_subset_sum = false;
376 bool use_subset_sum_before_makespan_min = false;
377
378 for (int j = i + 1; j < num_time_points; ++j) {
379 const IntegerValue strip_start = time_points[j - 1];
380 const IntegerValue window_end = time_points[j];
381 const IntegerValue max_reachable_capacity_in_current_strip =
382 reachable_capacity_ending_at[window_end];
383 DCHECK_LE(max_reachable_capacity_in_current_strip, capacity);
384
385 // Update states for the name of the generated cut.
386 if (max_reachable_capacity_in_current_strip < capacity) {
387 use_subset_sum = true;
388 if (window_end <= makespan_min) {
389 use_subset_sum_before_makespan_min = true;
390 }
391 }
392
393 const IntegerValue energy_in_strip =
394 (window_end - strip_start) * max_reachable_capacity_in_current_strip;
395 cumulated_max_energy += energy_in_strip;
396 if (window_end <= makespan_min) {
397 cumulated_max_energy_before_makespan_min += energy_in_strip;
398 }
399
400 if (window_start >= makespan_min) {
401 DCHECK_EQ(cumulated_max_energy_before_makespan_min, 0);
402 }
403 DCHECK_LE(cumulated_max_energy, capacity * (window_end - window_start));
404 const double max_energy_up_to_makespan_lp =
405 strip_start >= makespan_min
406 ? ToDouble(cumulated_max_energy_before_makespan_min) +
407 (makespan_lp - makespan_min_lp) * capacity_lp
408 : std::numeric_limits<double>::infinity();
409
410 // We prefer using the makespan as the cut will tighten itself when the
411 // objective value is improved.
412 //
413 // We reuse the min cut violation to allow some slack in the comparison
414 // between the two computed energy values.
415 const bool use_makespan =
416 max_energy_up_to_makespan_lp <=
417 ToDouble(cumulated_max_energy) + kMinCutViolation;
418 const double available_energy_lp = use_makespan
419 ? max_energy_up_to_makespan_lp
420 : ToDouble(cumulated_max_energy);
421 if (CutIsEfficient(events, window_start, window_end, available_energy_lp,
422 lp_values, &temp_builder)) {
423 OverloadedTimeWindowWithMakespan w;
424 w.start = window_start;
425 w.end = window_end;
426 w.fixed_energy_rhs = use_makespan
427 ? cumulated_max_energy_before_makespan_min
428 : cumulated_max_energy;
429 w.use_makespan = use_makespan;
430 w.use_subset_sum =
431 use_makespan ? use_subset_sum_before_makespan_min : use_subset_sum;
432 overloaded_time_windows.push_back(std::move(w));
433 }
434 }
435 }
436
437 if (overloaded_time_windows.empty()) return;
438
439 VLOG(3) << "GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity: "
440 << events.size() << " events, " << time_points.size()
441 << " time points, " << overloaded_time_windows.size()
442 << " overloads detected";
443
444 TopNCuts top_n_cuts(5);
445 for (const auto& w : overloaded_time_windows) {
446 bool cut_generated = true;
447 bool add_opt_to_name = false;
448 bool add_lifted_to_name = false;
449 bool add_quadratic_to_name = false;
450 bool add_energy_to_name = false;
451 LinearConstraintBuilder cut(model, kMinIntegerValue, w.fixed_energy_rhs);
452
453 if (w.use_makespan) { // Add the energy from makespan_min to makespan.
454 cut.AddConstant(makespan_min * capacity);
455 cut.AddTerm(makespan, -capacity);
456 }
457
458 // Add contributions from all events.
459 for (const EnergyEvent& event : events) {
460 if (!AddOneEvent(event, w.start, w.end, &cut, &add_energy_to_name,
461 &add_quadratic_to_name, &add_opt_to_name,
462 &add_lifted_to_name)) {
463 cut_generated = false;
464 break; // Exit the event loop.
465 }
466 }
467
468 if (cut_generated) {
469 std::string full_name(cut_name);
470 if (add_opt_to_name) full_name.append("_optional");
471 if (add_quadratic_to_name) full_name.append("_quadratic");
472 if (add_lifted_to_name) full_name.append("_lifted");
473 if (add_energy_to_name) full_name.append("_energy");
474 if (w.use_makespan) full_name.append("_makespan");
475 if (w.use_subset_sum) full_name.append("_subsetsum");
476 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
477 }
478 }
479
480 top_n_cuts.TransferToManager(manager);
481}
482
484 absl::string_view cut_name,
486 std::vector<EnergyEvent> events, const AffineExpression& capacity,
488 double max_possible_energy_lp = 0.0;
489 for (const EnergyEvent& event : events) {
490 max_possible_energy_lp += event.linearized_energy_lp_value;
491 }
492
493 // Currently, we look at all the possible time windows, and will push all cuts
494 // in the TopNCuts object. From our observations, this generator creates only
495 // a few cuts for a given run.
496 //
497 // The complexity of this loop is n^3. if we follow the latest research, we
498 // could implement this in n log^2(n). Still, this is not visible in the
499 // profile as we only this method at the root node,
500 struct OverloadedTimeWindow {
501 IntegerValue start;
502 IntegerValue end;
503 };
504 std::vector<OverloadedTimeWindow> overloaded_time_windows;
505 const double capacity_lp = capacity.LpValue(lp_values);
506
507 // Compute relevant time points.
508 // TODO(user): We could reduce this set.
509 absl::btree_set<IntegerValue> time_points_set;
510 IntegerValue max_end_min = kMinIntegerValue;
511 for (const EnergyEvent& event : events) {
512 time_points_set.insert(event.x_start_min);
513 time_points_set.insert(event.x_start_max);
514 time_points_set.insert(event.x_end_min);
515 time_points_set.insert(event.x_end_max);
516 max_end_min = std::max(max_end_min, event.x_end_min);
517 }
518 const std::vector<IntegerValue> time_points(time_points_set.begin(),
519 time_points_set.end());
520 const int num_time_points = time_points.size();
521
522 LinearConstraintBuilder temp_builder(model);
523 for (int i = 0; i + 1 < num_time_points; ++i) {
524 // Checks the time limit if the problem is too big.
525 if (events.size() > 50 && time_limit->LimitReached()) return;
526
527 const IntegerValue window_start = time_points[i];
528 // After max_end_min, all tasks can fit before window_start.
529 if (window_start >= max_end_min) break;
530
531 for (int j = i + 1; j < num_time_points; ++j) {
532 const IntegerValue window_end = time_points[j];
533 const double available_energy_lp =
534 ToDouble(window_end - window_start) * capacity_lp;
535 if (available_energy_lp >= max_possible_energy_lp) break;
536 if (CutIsEfficient(events, window_start, window_end, available_energy_lp,
537 lp_values, &temp_builder)) {
538 overloaded_time_windows.push_back({window_start, window_end});
539 }
540 }
541 }
542
543 if (overloaded_time_windows.empty()) return;
544
545 VLOG(3) << "GenerateCumulativeEnergeticCuts: " << events.size() << " events, "
546 << time_points.size() << " time points, "
547 << overloaded_time_windows.size() << " overloads detected";
548
549 TopNCuts top_n_cuts(5);
550 for (const auto& [window_start, window_end] : overloaded_time_windows) {
551 bool cut_generated = true;
552 bool add_opt_to_name = false;
553 bool add_lifted_to_name = false;
554 bool add_quadratic_to_name = false;
555 bool add_energy_to_name = false;
556 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
557
558 // Compute the max energy available for the tasks.
559 cut.AddTerm(capacity, window_start - window_end);
560
561 // Add all contributions.
562 for (const EnergyEvent& event : events) {
563 if (!AddOneEvent(event, window_start, window_end, &cut,
564 &add_energy_to_name, &add_quadratic_to_name,
565 &add_opt_to_name, &add_lifted_to_name)) {
566 cut_generated = false;
567 break; // Exit the event loop.
568 }
569 }
570
571 if (cut_generated) {
572 std::string full_name(cut_name);
573 if (add_opt_to_name) full_name.append("_optional");
574 if (add_quadratic_to_name) full_name.append("_quadratic");
575 if (add_lifted_to_name) full_name.append("_lifted");
576 if (add_energy_to_name) full_name.append("_energy");
577 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
578 }
579 }
580
581 top_n_cuts.TransferToManager(manager);
582}
583
585 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
586 const AffineExpression& capacity,
587 const std::optional<AffineExpression>& makespan, Model* model) {
588 CutGenerator result;
589 result.only_run_at_level_zero = true;
590 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
591 &result.vars);
593 helper, model, &result.vars,
595 if (makespan.has_value() && !makespan.value().IsConstant()) {
596 result.vars.push_back(makespan.value().var);
597 }
599 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
601
602 result.generate_cuts = [makespan, capacity, demands_helper, helper,
603 integer_trail, time_limit,
604 model](LinearConstraintManager* manager) {
605 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
606 if (!demands_helper->CacheAllEnergyValues()) return true;
607
608 const auto& lp_values = manager->LpValues();
609 std::vector<EnergyEvent> events;
610 for (int i = 0; i < helper->NumTasks(); ++i) {
611 if (helper->IsAbsent(i)) continue;
612 // TODO(user): use level 0 bounds ?
613 if (demands_helper->DemandMax(i) == 0 || helper->SizeMin(i) == 0) {
614 continue;
615 }
616
617 EnergyEvent e(i, helper);
618 e.y_size = demands_helper->Demands()[i];
619 e.y_size_min = demands_helper->DemandMin(i);
620 e.decomposed_energy = demands_helper->DecomposedEnergies()[i];
621 e.energy_min = demands_helper->EnergyMin(i);
622 e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i);
623 if (!helper->IsPresent(i)) {
625 }
626 // We can always skip events.
627 if (!e.FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
628 events.push_back(e);
629 }
630
631 if (makespan.has_value() && integer_trail->IsFixed(capacity)) {
633 "CumulativeEnergyM", lp_values, events,
634 integer_trail->FixedValue(capacity), makespan.value(), time_limit,
635 model, manager);
636
637 } else {
638 GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, events,
639 capacity, time_limit, model, manager);
640 }
641 return true;
642 };
643
644 return result;
645}
646
649 const std::optional<AffineExpression>& makespan, Model* model) {
650 CutGenerator result;
651 result.only_run_at_level_zero = true;
653 helper, model, &result.vars,
655 if (makespan.has_value() && !makespan.value().IsConstant()) {
656 result.vars.push_back(makespan.value().var);
657 }
660
661 result.generate_cuts = [makespan, helper, time_limit,
662 model](LinearConstraintManager* manager) {
663 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
664
665 const auto& lp_values = manager->LpValues();
666 std::vector<EnergyEvent> events;
667 for (int i = 0; i < helper->NumTasks(); ++i) {
668 if (helper->IsAbsent(i)) continue;
669 if (helper->SizeMin(i) == 0) continue;
670
671 EnergyEvent e(i, helper);
672 e.y_size = IntegerValue(1);
673 e.y_size_min = IntegerValue(1);
675 if (!helper->IsPresent(i)) {
677 }
678 // We can always skip events.
679 if (!e.FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
680 events.push_back(e);
681 }
682
683 if (makespan.has_value()) {
685 "NoOverlapEnergyM", lp_values, events,
686 /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model,
687 manager);
688 } else {
689 GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, events,
690 /*capacity=*/IntegerValue(1), time_limit,
691 model, manager);
692 }
693 return true;
694 };
695 return result;
696}
697
699 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
700 const AffineExpression& capacity, Model* model) {
701 CutGenerator result;
702 result.only_run_at_level_zero = true;
703 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
704 &result.vars);
705 AddIntegerVariableFromIntervals(helper, model, &result.vars,
708
709 struct TimeTableEvent {
710 int interval_index;
711 IntegerValue time;
712 LinearExpression demand;
713 double demand_lp = 0.0;
714 bool is_positive = false;
715 bool use_energy = false;
716 bool is_optional = false;
717 };
718
719 result.generate_cuts = [helper, capacity, demands_helper,
720 model](LinearConstraintManager* manager) {
721 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
722 if (!demands_helper->CacheAllEnergyValues()) return true;
723
724 TopNCuts top_n_cuts(5);
725 std::vector<TimeTableEvent> events;
726 const auto& lp_values = manager->LpValues();
727 if (lp_values.empty()) return true; // No linear relaxation.
728 const double capacity_lp = capacity.LpValue(lp_values);
729
730 // Iterate through the intervals. If start_max < end_min, the demand
731 // is mandatory.
732 for (int i = 0; i < helper->NumTasks(); ++i) {
733 if (helper->IsAbsent(i)) continue;
734
735 const IntegerValue start_max = helper->StartMax(i);
736 const IntegerValue end_min = helper->EndMin(i);
737
738 if (start_max >= end_min) continue;
739
740 TimeTableEvent e1;
741 e1.interval_index = i;
742 e1.time = start_max;
743 {
744 LinearConstraintBuilder builder(model);
745 // Ignore the interval if the linearized demand fails.
746 if (!demands_helper->AddLinearizedDemand(i, &builder)) continue;
747 e1.demand = builder.BuildExpression();
748 }
749 e1.demand_lp = e1.demand.LpValue(lp_values);
750 e1.is_positive = true;
751 e1.use_energy = !demands_helper->DecomposedEnergies()[i].empty();
752 e1.is_optional = !helper->IsPresent(i);
753
754 TimeTableEvent e2 = e1;
755 e2.time = end_min;
756 e2.is_positive = false;
757
758 events.push_back(e1);
759 events.push_back(e2);
760 }
761
762 // Sort events by time.
763 // It is also important that all positive event with the same time as
764 // negative events appear after for the correctness of the algo below.
765 std::sort(events.begin(), events.end(),
766 [](const TimeTableEvent& i, const TimeTableEvent& j) {
767 if (i.time == j.time) {
768 if (i.is_positive == j.is_positive) {
769 return i.interval_index < j.interval_index;
770 }
771 return !i.is_positive;
772 }
773 return i.time < j.time;
774 });
775
776 double sum_of_demand_lp = 0.0;
777 bool positive_event_added_since_last_check = false;
778 for (int i = 0; i < events.size(); ++i) {
779 const TimeTableEvent& e = events[i];
780 if (e.is_positive) {
781 positive_event_added_since_last_check = true;
782 sum_of_demand_lp += e.demand_lp;
783 continue;
784 }
785
786 if (positive_event_added_since_last_check) {
787 // Reset positive event added. We do not want to create cuts for
788 // each negative event in sequence.
789 positive_event_added_since_last_check = false;
790
791 if (sum_of_demand_lp >= capacity_lp + kMinCutViolation) {
792 // Create cut.
793 bool use_energy = false;
794 bool use_optional = false;
795 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
796 cut.AddTerm(capacity, IntegerValue(-1));
797 // The i-th event, which is a negative event, follows a positive
798 // event. We must ignore it in our cut generation.
799 DCHECK(!events[i].is_positive);
800 const IntegerValue time_point = events[i - 1].time;
801
802 for (int j = 0; j < i; ++j) {
803 const TimeTableEvent& cut_event = events[j];
804 const int t = cut_event.interval_index;
805 DCHECK_LE(helper->StartMax(t), time_point);
806 if (!cut_event.is_positive || helper->EndMin(t) <= time_point) {
807 continue;
808 }
809
810 cut.AddLinearExpression(cut_event.demand, IntegerValue(1));
811 use_energy |= cut_event.use_energy;
812 use_optional |= cut_event.is_optional;
813 }
814
815 std::string cut_name = "CumulativeTimeTable";
816 if (use_optional) cut_name += "_optional";
817 if (use_energy) cut_name += "_energy";
818 top_n_cuts.AddCut(cut.Build(), cut_name, lp_values);
819 }
820 }
821
822 // The demand_lp was added in case of a positive event. We need to
823 // remove it for a negative event.
824 sum_of_demand_lp -= e.demand_lp;
825 }
826 top_n_cuts.TransferToManager(manager);
827 return true;
828 };
829 return result;
830}
831
832// Cached Information about one interval.
833// Note that everything must correspond to level zero bounds, otherwise the
834// generated cut are not valid.
835
838 : start_min(helper->StartMin(t)),
839 start_max(helper->StartMax(t)),
840 start(helper->Starts()[t]),
841 end_min(helper->EndMin(t)),
842 end_max(helper->EndMax(t)),
843 end(helper->Ends()[t]),
844 size_min(helper->SizeMin(t)) {}
845
846 IntegerValue start_min;
847 IntegerValue start_max;
849 IntegerValue end_min;
850 IntegerValue end_max;
852 IntegerValue size_min;
853
854 IntegerValue demand_min;
855};
856
858 absl::string_view cut_name,
860 std::vector<CachedIntervalData> events, IntegerValue capacity_max,
861 Model* model, LinearConstraintManager* manager) {
862 TopNCuts top_n_cuts(5);
863 const int num_events = events.size();
864 if (num_events <= 1) return;
865
866 std::sort(events.begin(), events.end(),
867 [](const CachedIntervalData& e1, const CachedIntervalData& e2) {
868 return e1.start_min < e2.start_min ||
869 (e1.start_min == e2.start_min && e1.end_max < e2.end_max);
870 });
871
872 // Balas disjunctive cuts on 2 tasks a and b:
873 // start_1 * (duration_1 + start_min_1 - start_min_2) +
874 // start_2 * (duration_2 + start_min_2 - start_min_1) >=
875 // duration_1 * duration_2 +
876 // start_min_1 * duration_2 +
877 // start_min_2 * duration_1
878 // From: David L. Applegate, William J. Cook:
879 // A Computational Study of the Job-Shop Scheduling Problem. 149-156
880 // INFORMS Journal on Computing, Volume 3, Number 1, Winter 1991
881 const auto add_balas_disjunctive_cut =
882 [&](absl::string_view local_cut_name, IntegerValue start_min_1,
883 IntegerValue duration_min_1, AffineExpression start_1,
884 IntegerValue start_min_2, IntegerValue duration_min_2,
885 AffineExpression start_2) {
886 // Checks hypothesis from the cut.
887 if (start_min_2 >= start_min_1 + duration_min_1 ||
888 start_min_1 >= start_min_2 + duration_min_2) {
889 return;
890 }
891 const IntegerValue coeff_1 = duration_min_1 + start_min_1 - start_min_2;
892 const IntegerValue coeff_2 = duration_min_2 + start_min_2 - start_min_1;
893 const IntegerValue rhs = duration_min_1 * duration_min_2 +
894 duration_min_1 * start_min_2 +
895 duration_min_2 * start_min_1;
896
897 if (ToDouble(coeff_1) * start_1.LpValue(lp_values) +
898 ToDouble(coeff_2) * start_2.LpValue(lp_values) <=
899 ToDouble(rhs) - kMinCutViolation) {
901 cut.AddTerm(start_1, coeff_1);
902 cut.AddTerm(start_2, coeff_2);
903 top_n_cuts.AddCut(cut.Build(), local_cut_name, lp_values);
904 }
905 };
906
907 for (int i = 0; i + 1 < num_events; ++i) {
908 const CachedIntervalData& e1 = events[i];
909 for (int j = i + 1; j < num_events; ++j) {
910 const CachedIntervalData& e2 = events[j];
911 if (e2.start_min >= e1.end_max) break; // Break out of the index2 loop.
912
913 // Encode only the interesting pairs.
914 if (e1.demand_min + e2.demand_min <= capacity_max) continue;
915
916 const bool interval_1_can_precede_2 = e1.end_min <= e2.start_max;
917 const bool interval_2_can_precede_1 = e2.end_min <= e1.start_max;
918
919 if (interval_1_can_precede_2 && !interval_2_can_precede_1 &&
920 e1.end.LpValue(lp_values) >=
921 e2.start.LpValue(lp_values) + kMinCutViolation) {
922 // interval_1.end <= interval_2.start
923 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
924 cut.AddTerm(e1.end, IntegerValue(1));
925 cut.AddTerm(e2.start, IntegerValue(-1));
926 top_n_cuts.AddCut(cut.Build(),
927 absl::StrCat(cut_name, "DetectedPrecedence"),
928 lp_values);
929 } else if (interval_2_can_precede_1 && !interval_1_can_precede_2 &&
930 e2.end.LpValue(lp_values) >=
931 e1.start.LpValue(lp_values) + kMinCutViolation) {
932 // interval_2.end <= interval_1.start
933 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
934 cut.AddTerm(e2.end, IntegerValue(1));
935 cut.AddTerm(e1.start, IntegerValue(-1));
936 top_n_cuts.AddCut(cut.Build(),
937 absl::StrCat(cut_name, "DetectedPrecedence"),
938 lp_values);
939 } else {
940 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnStart"),
941 e1.start_min, e1.size_min, e1.start,
942 e2.start_min, e2.size_min, e2.start);
943 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnEnd"),
944 -e1.end_max, e1.size_min, e1.end.Negated(),
945 -e2.end_max, e2.size_min, e2.end.Negated());
946 }
947 }
948 }
949
950 top_n_cuts.TransferToManager(manager);
951}
952
954 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
955 const AffineExpression& capacity, Model* model) {
956 CutGenerator result;
957 result.only_run_at_level_zero = true;
958 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
959 &result.vars);
961 helper, model, &result.vars,
964
965 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
966 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
967 model](LinearConstraintManager* manager) {
968 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
969
970 std::vector<CachedIntervalData> events;
971 for (int t = 0; t < helper->NumTasks(); ++t) {
972 if (!helper->IsPresent(t)) continue;
973 CachedIntervalData event(t, helper);
974 event.demand_min = demands_helper->DemandMin(t);
975 events.push_back(event);
976 }
977
978 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
980 "Cumulative", manager->LpValues(), std::move(events), capacity_max,
981 model, manager);
982 return true;
983 };
984 return result;
985}
986
988 SchedulingConstraintHelper* helper, Model* model) {
989 CutGenerator result;
990 result.only_run_at_level_zero = true;
992 helper, model, &result.vars,
995
996 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
997 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
998
999 std::vector<CachedIntervalData> events;
1000 for (int t = 0; t < helper->NumTasks(); ++t) {
1001 if (!helper->IsPresent(t)) continue;
1002 CachedIntervalData event(t, helper);
1003 event.demand_min = IntegerValue(1);
1004 events.push_back(event);
1005 }
1006
1008 "NoOverlap", manager->LpValues(), std::move(events), IntegerValue(1),
1009 model, manager);
1010 return true;
1011 };
1012
1013 return result;
1014}
1015
1017 : BaseEvent(t, x_helper) {}
1018
1019std::string CtEvent::DebugString() const {
1020 return absl::StrCat("CtEvent(x_end = ", x_end.DebugString(),
1021 ", x_start_min = ", x_start_min.value(),
1022 ", x_start_max = ", x_start_max.value(),
1023 ", x_size_min = ", x_size_min.value(),
1024 ", x_lp_end = ", x_lp_end,
1025 ", y_size_min = ", y_size_min.value(),
1026 ", energy_min = ", energy_min.value(),
1027 ", use_energy = ", use_energy, ", lifted = ", lifted);
1028}
1029
1030namespace {
1031
1032// This functions packs all events in a cumulative of capacity 'capacity_max'
1033// following the given permutation. It returns the sum of end mins and the sum
1034// of end mins weighted by event.weight.
1035//
1036// It ensures that if event_j is after event_i in the permutation, then event_j
1037// starts exactly at the same time or after event_i.
1038//
1039// It returns false if one event cannot start before event.start_max.
1040bool ComputeWeightedSumOfEndMinsForOnePermutation(
1041 absl::Span<const PermutableEvent> events, IntegerValue capacity_max,
1042 IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends,
1043 std::vector<std::pair<IntegerValue, IntegerValue>>& profile,
1044 std::vector<std::pair<IntegerValue, IntegerValue>>& new_profile) {
1045 sum_of_ends = 0;
1046 sum_of_weighted_ends = 0;
1047
1048 // The profile (and new profile) is a set of (time, capa_left) pairs, ordered
1049 // by increasing time and capa_left.
1050 profile.clear();
1051 profile.emplace_back(kMinIntegerValue, capacity_max);
1052 profile.emplace_back(kMaxIntegerValue, capacity_max);
1053 IntegerValue start_of_previous_task = kMinIntegerValue;
1054 for (const PermutableEvent& event : events) {
1055 const IntegerValue start_min =
1056 std::max(event.start_min, start_of_previous_task);
1057
1058 // Iterate on the profile to find the step that contains start_min.
1059 // Then push until we find a step with enough capacity.
1060 int current = 0;
1061 while (profile[current + 1].first <= start_min ||
1062 profile[current].second < event.demand) {
1063 ++current;
1064 }
1065
1066 const IntegerValue actual_start =
1067 std::max(start_min, profile[current].first);
1068 start_of_previous_task = actual_start;
1069
1070 // Compatible with the event.start_max ?
1071 if (actual_start > event.start_max) return false;
1072
1073 const IntegerValue actual_end = actual_start + event.size;
1074 sum_of_ends += actual_end;
1075 sum_of_weighted_ends += event.weight * actual_end;
1076
1077 // No need to update the profile on the last loop.
1078 if (&event == &events.back()) break;
1079
1080 // Update the profile.
1081 new_profile.clear();
1082 new_profile.push_back(
1083 {actual_start, profile[current].second - event.demand});
1084 ++current;
1085
1086 while (profile[current].first < actual_end) {
1087 new_profile.push_back(
1088 {profile[current].first, profile[current].second - event.demand});
1089 ++current;
1090 }
1091
1092 if (profile[current].first > actual_end) {
1093 new_profile.push_back(
1094 {actual_end, new_profile.back().second + event.demand});
1095 }
1096 while (current < profile.size()) {
1097 new_profile.push_back(profile[current]);
1098 ++current;
1099 }
1100 profile.swap(new_profile);
1101 }
1102 return true;
1103}
1104
1105} // namespace
1106
1107bool ComputeMinSumOfWeightedEndMins(std::vector<PermutableEvent>& events,
1108 IntegerValue capacity_max,
1109 IntegerValue& min_sum_of_end_mins,
1110 IntegerValue& min_sum_of_weighted_end_mins,
1111 IntegerValue unweighted_threshold,
1112 IntegerValue weighted_threshold) {
1113 int num_explored = 0;
1114 int num_pruned = 0;
1115 min_sum_of_end_mins = kMaxIntegerValue;
1116 min_sum_of_weighted_end_mins = kMaxIntegerValue;
1117
1118 // Reusable storage for ComputeWeightedSumOfEndMinsForOnePermutation().
1119 std::vector<std::pair<IntegerValue, IntegerValue>> profile;
1120 std::vector<std::pair<IntegerValue, IntegerValue>> new_profile;
1121 do {
1122 IntegerValue sum_of_ends(0);
1123 IntegerValue sum_of_weighted_ends(0);
1124 if (ComputeWeightedSumOfEndMinsForOnePermutation(
1125 events, capacity_max, sum_of_ends, sum_of_weighted_ends, profile,
1126 new_profile)) {
1127 min_sum_of_end_mins = std::min(sum_of_ends, min_sum_of_end_mins);
1128 min_sum_of_weighted_end_mins =
1129 std::min(sum_of_weighted_ends, min_sum_of_weighted_end_mins);
1130 num_explored++;
1131 if (min_sum_of_end_mins <= unweighted_threshold &&
1132 min_sum_of_weighted_end_mins <= weighted_threshold) {
1133 break;
1134 }
1135 } else {
1136 num_pruned++;
1137 }
1138 } while (std::next_permutation(events.begin(), events.end()));
1139 VLOG(3) << "DP: size=" << events.size() << ", explored = " << num_explored
1140 << ", pruned = " << num_pruned
1141 << ", min_sum_of_end_mins = " << min_sum_of_end_mins
1142 << ", min_sum_of_weighted_end_mins = "
1143 << min_sum_of_weighted_end_mins;
1144 return num_explored > 0;
1145}
1146
1147// TODO(user): Improve performance
1148// - detect disjoint tasks (no need to crossover to the second part)
1149// - better caching of explored states
1151 const std::string& cut_name, std::vector<CtEvent> events,
1152 IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) {
1153 TopNCuts top_n_cuts(5);
1154 // Sort by start min to bucketize by start_min.
1155 std::sort(events.begin(), events.end(),
1156 [](const CtEvent& e1, const CtEvent& e2) {
1157 return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) <
1158 std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end);
1159 });
1160 std::vector<PermutableEvent> permutable_events;
1161 for (int start = 0; start + 1 < events.size(); ++start) {
1162 // Skip to the next start_min value.
1163 if (start > 0 &&
1164 events[start].x_start_min == events[start - 1].x_start_min) {
1165 continue;
1166 }
1167
1168 const IntegerValue sequence_start_min = events[start].x_start_min;
1169 std::vector<CtEvent> residual_tasks(events.begin() + start, events.end());
1170
1171 // We look at events that start before sequence_start_min, but are forced
1172 // to cross this time point. In that case, we replace this event by a
1173 // truncated event starting at sequence_start_min. To do this, we reduce
1174 // the size_min, and align the start_min with the sequence_start_min.
1175 for (int before = 0; before < start; ++before) {
1176 if (events[before].x_start_min + events[before].x_size_min >
1177 sequence_start_min) {
1178 residual_tasks.push_back(events[before]); // Copy.
1179 residual_tasks.back().lifted = true;
1180 }
1181 }
1182
1183 std::sort(residual_tasks.begin(), residual_tasks.end(),
1184 [](const CtEvent& e1, const CtEvent& e2) {
1185 return e1.x_lp_end < e2.x_lp_end;
1186 });
1187
1188 IntegerValue sum_of_durations(0);
1189 IntegerValue sum_of_energies(0);
1190 double sum_of_ends_lp = 0.0;
1191 double sum_of_weighted_ends_lp = 0.0;
1192 IntegerValue sum_of_demands(0);
1193
1194 permutable_events.clear();
1195 for (int i = 0; i < std::min<int>(residual_tasks.size(), 7); ++i) {
1196 const CtEvent& event = residual_tasks[i];
1197 permutable_events.emplace_back(i, event);
1198 sum_of_ends_lp += event.x_lp_end;
1199 sum_of_weighted_ends_lp += event.x_lp_end * ToDouble(event.y_size_min);
1200 sum_of_demands += event.y_size_min;
1201 sum_of_durations += event.x_size_min;
1202 sum_of_energies += event.x_size_min * event.y_size_min;
1203
1204 // Both cases with 1 or 2 tasks are trivial and independent of the order.
1205 // Also, if capacity is not exceeded, pushing all ends left is a valid LP
1206 // assignment.
1207 if (i <= 1 || sum_of_demands <= capacity_max) continue;
1208
1209 IntegerValue min_sum_of_end_mins = kMaxIntegerValue;
1210 IntegerValue min_sum_of_weighted_end_mins = kMaxIntegerValue;
1211 for (int j = 0; j <= i; ++j) {
1212 // We re-index the elements, so we will start enumerating the
1213 // permutation from there. Note that if the previous i caused an abort
1214 // because of the threshold, we might abort right away again!
1215 permutable_events[j].index = j;
1216 }
1218 permutable_events, capacity_max, min_sum_of_end_mins,
1219 min_sum_of_weighted_end_mins,
1220 /*unweighted_threshold=*/
1221 std::floor(sum_of_ends_lp + kMinCutViolation),
1222 /*weighted_threshold=*/
1223 std::floor(sum_of_weighted_ends_lp + kMinCutViolation))) {
1224 break;
1225 }
1226
1227 const double unweigthed_violation =
1228 (ToDouble(min_sum_of_end_mins) - sum_of_ends_lp) /
1229 ToDouble(sum_of_durations);
1230 const double weighted_violation =
1231 (ToDouble(min_sum_of_weighted_end_mins) - sum_of_weighted_ends_lp) /
1232 ToDouble(sum_of_energies);
1233
1234 // Unweighted cuts.
1235 if (unweigthed_violation > weighted_violation &&
1236 unweigthed_violation > kMinCutViolation) {
1237 LinearConstraintBuilder cut(model, min_sum_of_end_mins,
1239 bool is_lifted = false;
1240 for (int j = 0; j <= i; ++j) {
1241 const CtEvent& event = residual_tasks[j];
1242 is_lifted |= event.lifted;
1243 cut.AddTerm(event.x_end, IntegerValue(1));
1244 }
1245 std::string full_name = cut_name;
1246 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1247 }
1248
1249 // Weighted cuts.
1250 if (weighted_violation >= unweigthed_violation &&
1251 weighted_violation > kMinCutViolation) {
1252 LinearConstraintBuilder cut(model, min_sum_of_weighted_end_mins,
1254 bool is_lifted = false;
1255 for (int j = 0; j <= i; ++j) {
1256 const CtEvent& event = residual_tasks[j];
1257 is_lifted |= event.lifted;
1258 cut.AddTerm(event.x_end, event.y_size_min);
1259 }
1260 std::string full_name = cut_name + "_weighted";
1261 if (is_lifted) full_name.append("_lifted");
1262 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1263 }
1264 }
1265 }
1266 top_n_cuts.TransferToManager(manager);
1267}
1268
1269namespace {
1270
1271// Returns a copy of the event with the start time increased to time.
1272// Energy (min and decomposed) are updated accordingly.
1273CtEvent TrimEventAfter(IntegerValue time, const CtEvent& old_event) {
1274 DCHECK_GT(time, old_event.x_start_min);
1275 CtEvent event = old_event; // Copy.
1276 event.lifted = true;
1277 // Build the vector of energies as the vector of sizes.
1278 event.energy_min = ComputeEnergyMinInWindow(
1279 event.x_start_min, event.x_start_max, event.x_end_min, event.x_end_max,
1280 event.x_size_min, event.y_size_min, event.decomposed_energy, time,
1281 event.x_end_max);
1282 event.x_size_min = event.x_size_min + event.x_start_min - time;
1283 event.x_start_min = time;
1284 if (event.energy_min > event.x_size_min * event.y_size_min) {
1285 event.use_energy = true;
1286 }
1287 DCHECK_GE(event.energy_min, event.x_size_min * event.y_size_min);
1288 return event;
1289}
1290
1291// Collects all possible demand values for the event, and adds them to the
1292// subset sum of the reachable capacity of the cumulative constraint.
1293void AddEventDemandsToCapacitySubsetSum(
1294 const CtEvent& event, const VariablesAssignment& assignment,
1295 IntegerValue capacity_max, std::vector<int64_t>& tmp_possible_demands,
1296 MaxBoundedSubsetSum& dp) {
1297 if (dp.CurrentMax() != capacity_max) {
1298 if (event.y_size_is_fixed) {
1299 dp.Add(event.y_size_min.value());
1300 } else if (!event.decomposed_energy.empty()) {
1301 tmp_possible_demands.clear();
1302 for (const auto& [literal, size, demand] : event.decomposed_energy) {
1303 if (assignment.LiteralIsFalse(literal)) continue;
1304 if (assignment.LiteralIsTrue(literal)) {
1305 tmp_possible_demands.assign({demand.value()});
1306 break;
1307 }
1308 tmp_possible_demands.push_back(demand.value());
1309 }
1310 dp.AddChoices(tmp_possible_demands);
1311 } else { // event.y_size_min is not fixed, we abort.
1312 // TODO(user): We could still add all events in the range if the range
1313 // is small.
1314 dp.Add(capacity_max.value());
1315 }
1316 }
1317}
1318
1319} // namespace
1320
1321// We generate the cut from the Smith's rule from:
1322// M. Queyranne, Structure of a simple scheduling polyhedron,
1323// Mathematical Programming 58 (1993), 263–285
1324//
1325// The original cut is:
1326// sum(end_min_i * size_min_i) >=
1327// (sum(size_min_i^2) + sum(size_min_i)^2) / 2
1328// We strengthen this cuts by noticing that if all tasks starts after S,
1329// then replacing end_min_i by (end_min_i - S) is still valid.
1330//
1331// A second difference is that we lift intervals that starts before a given
1332// value, but are forced to cross it. This lifting procedure implies trimming
1333// interval to its part that is after the given value.
1334//
1335// In the case of a cumulative constraint with a capacity of C, we compute a
1336// valid equation by splitting the task (size_min si, demand_min di) into di
1337// tasks of size si and demand 1, that we spread across C no_overlap
1338// constraint. When doing so, the lhs of the equation is the same, the first
1339// term of the rhs is also unchanged. A lower bound of the second term of the
1340// rhs is reached when the split is exact (each no_overlap sees a long demand of
1341// sum(si * di / C). Thus the second term is greater or equal to
1342// (sum (si * di) ^ 2) / (2 * C)
1343//
1344// Sometimes, the min energy of the task i is greater than si * di.
1345// Let's introduce ai the minimum energy of the task and rewrite the previous
1346// equation. In that new setting, we can rewrite the cumulative transformation
1347// by splitting each tasks into at least di tasks of size at least si and demand
1348// 1.
1349//
1350// In that setting, the lhs is rewritten as sum(ai * ei) and the second term of
1351// the rhs is rewritten as sum(ai) ^ 2 / (2 * C).
1352//
1353// The question is how to rewrite the term `sum(di * si * si). The minimum
1354// contribution is when the task has size si and demand ai / si. (note that si
1355// is the minimum size of the task, and di its minimum demand). We can
1356// replace the small rectangle area term by ai * si.
1357//
1358// sum (ai * ei) - sum (ai) * current_start_min >=
1359// sum(si * ai) / 2 + (sum (ai) ^ 2) / (2 * C)
1360//
1361// The separation procedure is implemented using two loops:
1362// - first, we loop on potential start times in increasing order.
1363// - second loop, we add tasks that must contribute after this start time
1364// ordered by increasing end time in the LP relaxation.
1365void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
1366 std::vector<CtEvent> events,
1367 IntegerValue capacity_max,
1368 bool skip_low_sizes, Model* model,
1369 LinearConstraintManager* manager) {
1370 TopNCuts top_n_cuts(5);
1371 const VariablesAssignment& assignment =
1372 model->GetOrCreate<Trail>()->Assignment();
1373 std::vector<int64_t> tmp_possible_demands;
1374
1375 // Sort by start min to bucketize by start_min.
1376 std::sort(events.begin(), events.end(),
1377 [](const CtEvent& e1, const CtEvent& e2) {
1378 return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) <
1379 std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end);
1380 });
1381
1382 // First loop: we loop on potential start times.
1383 for (int start = 0; start + 1 < events.size(); ++start) {
1384 // Skip to the next start_min value.
1385 if (start > 0 &&
1386 events[start].x_start_min == events[start - 1].x_start_min) {
1387 continue;
1388 }
1389
1390 const IntegerValue sequence_start_min = events[start].x_start_min;
1391 std::vector<CtEvent> residual_tasks(events.begin() + start, events.end());
1392
1393 // We look at events that start before sequence_start_min, but are forced
1394 // to cross this time point. In that case, we replace this event by a
1395 // truncated event starting at sequence_start_min. To do this, we reduce
1396 // the size_min, align the start_min with the sequence_start_min, and
1397 // scale the energy down accordingly.
1398 for (int before = 0; before < start; ++before) {
1399 if (events[before].x_start_min + events[before].x_size_min >
1400 sequence_start_min) {
1401 CtEvent event = TrimEventAfter(sequence_start_min, events[before]);
1402 if (event.energy_min <= 0) continue;
1403 residual_tasks.push_back(std::move(event));
1404 }
1405 }
1406
1407 // Best cut so far for this loop.
1408 int best_end = -1;
1409 double best_efficacy = 0.01;
1410 IntegerValue best_min_contrib = 0;
1411 IntegerValue best_capacity = 0;
1412
1413 // Used in the first term of the rhs of the equation.
1414 IntegerValue sum_event_contributions = 0;
1415 // Used in the second term of the rhs of the equation.
1416 IntegerValue sum_energy = 0;
1417 // For normalization.
1418 IntegerValue sum_square_energy = 0;
1419
1420 double lp_contrib = 0.0;
1421 IntegerValue current_start_min(kMaxIntegerValue);
1422
1423 MaxBoundedSubsetSum dp(capacity_max.value());
1424
1425 // We will add tasks one by one, sorted by end time, and evaluate the
1426 // potential cut at each step.
1427 std::sort(residual_tasks.begin(), residual_tasks.end(),
1428 [](const CtEvent& e1, const CtEvent& e2) {
1429 return e1.x_lp_end < e2.x_lp_end;
1430 });
1431
1432 // Second loop: we add tasks one by one.
1433 for (int i = 0; i < residual_tasks.size(); ++i) {
1434 const CtEvent& event = residual_tasks[i];
1435 DCHECK_GE(event.x_start_min, sequence_start_min);
1436
1437 // Make sure we do not overflow.
1438 if (!AddTo(event.energy_min, &sum_energy)) break;
1439 // In the no_overlap case, we have:
1440 // area = event.x_size_min ^ 2
1441 // In the simple cumulative case, we split split the task
1442 // (demand_min, size_min) into demand_min tasks in the no_overlap case.
1443 // area = event.y_size_min * event.x_size_min * event.x_size_min
1444 // In the cumulative case, we can have energy_min > side_min * demand_min.
1445 // In that case, we use energy_min * size_min.
1446 if (!AddProductTo(event.energy_min, event.x_size_min,
1447 &sum_event_contributions)) {
1448 break;
1449 }
1450 if (!AddSquareTo(event.energy_min, &sum_square_energy)) break;
1451
1452 lp_contrib += event.x_lp_end * ToDouble(event.energy_min);
1453 current_start_min = std::min(current_start_min, event.x_start_min);
1454
1455 // Maintain the reachable capacity with a bounded complexity subset sum.
1456 AddEventDemandsToCapacitySubsetSum(event, assignment, capacity_max,
1457 tmp_possible_demands, dp);
1458
1459 // This is competing with the brute force approach. Skip cases covered
1460 // by the other code.
1461 if (skip_low_sizes && i < 7) continue;
1462
1463 const IntegerValue reachable_capacity = dp.CurrentMax();
1464
1465 // Do we have a violated cut ?
1466 const IntegerValue large_rectangle_contrib =
1467 CapProdI(sum_energy, sum_energy);
1468 if (AtMinOrMaxInt64I(large_rectangle_contrib)) break;
1469 const IntegerValue mean_large_rectangle_contrib =
1470 CeilRatio(large_rectangle_contrib, reachable_capacity);
1471
1472 IntegerValue min_contrib =
1473 CapAddI(sum_event_contributions, mean_large_rectangle_contrib);
1474 if (AtMinOrMaxInt64I(min_contrib)) break;
1475 min_contrib = CeilRatio(min_contrib, 2);
1476
1477 // shift contribution by current_start_min.
1478 if (!AddProductTo(sum_energy, current_start_min, &min_contrib)) break;
1479
1480 // The efficacy of the cut is the normalized violation of the above
1481 // equation. We will normalize by the sqrt of the sum of squared energies.
1482 const double efficacy = (ToDouble(min_contrib) - lp_contrib) /
1483 std::sqrt(ToDouble(sum_square_energy));
1484
1485 // For a given start time, we only keep the best cut.
1486 // The reason is that is the cut is strongly violated, we can get a
1487 // sequence of violated cuts as we add more tasks. These new cuts will
1488 // be less violated, but will not bring anything useful to the LP
1489 // relaxation. At the same time, this sequence of cuts can push out
1490 // other cuts from a disjoint set of tasks.
1491 if (efficacy > best_efficacy) {
1492 best_efficacy = efficacy;
1493 best_end = i;
1494 best_min_contrib = min_contrib;
1495 best_capacity = reachable_capacity;
1496 }
1497 }
1498
1499 // We have inserted all tasks. Have we found a violated cut ?
1500 // If so, add the most violated one to the top_n cut container.
1501 if (best_end != -1) {
1502 LinearConstraintBuilder cut(model, best_min_contrib, kMaxIntegerValue);
1503 bool is_lifted = false;
1504 bool add_energy_to_name = false;
1505 for (int i = 0; i <= best_end; ++i) {
1506 const CtEvent& event = residual_tasks[i];
1507 is_lifted |= event.lifted;
1508 add_energy_to_name |= event.use_energy;
1509 cut.AddTerm(event.x_end, event.energy_min);
1510 }
1511 std::string full_name(cut_name);
1512 if (is_lifted) full_name.append("_lifted");
1513 if (add_energy_to_name) full_name.append("_energy");
1514 if (best_capacity < capacity_max) {
1515 full_name.append("_subsetsum");
1516 }
1517 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1518 }
1519 }
1520 top_n_cuts.TransferToManager(manager);
1521}
1522
1524 SchedulingConstraintHelper* helper, Model* model) {
1525 CutGenerator result;
1526 result.only_run_at_level_zero = true;
1527 AddIntegerVariableFromIntervals(helper, model, &result.vars,
1530
1531 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
1532 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1533
1534 auto generate_cuts = [model, manager, helper](bool mirror) {
1535 std::vector<CtEvent> events;
1536 const auto& lp_values = manager->LpValues();
1537 for (int index = 0; index < helper->NumTasks(); ++index) {
1538 if (!helper->IsPresent(index)) continue;
1539 const IntegerValue size_min = helper->SizeMin(index);
1540 if (size_min > 0) {
1541 const AffineExpression end_expr = helper->Ends()[index];
1542 CtEvent event(index, helper);
1543 event.x_end = end_expr;
1544 event.x_lp_end = end_expr.LpValue(lp_values);
1545 event.y_size_min = IntegerValue(1);
1546 event.energy_min = size_min;
1547 events.push_back(event);
1548 }
1549 }
1550
1551 const std::string mirror_str = mirror ? "_mirror" : "";
1553 absl::StrCat("NoOverlapCompletionTimeExhaustive", mirror_str), events,
1554 /*capacity_max=*/IntegerValue(1), model, manager);
1555
1557 absl::StrCat("NoOverlapCompletionTimeQueyrane", mirror_str),
1558 std::move(events), /*capacity_max=*/IntegerValue(1),
1559 /*skip_low_sizes=*/true, model, manager);
1560 };
1561 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1562 generate_cuts(false);
1563 if (!helper->SynchronizeAndSetTimeDirection(false)) return false;
1564 generate_cuts(true);
1565 return true;
1566 };
1567 return result;
1568}
1569
1571 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
1572 const AffineExpression& capacity, Model* model) {
1573 CutGenerator result;
1574 result.only_run_at_level_zero = true;
1575 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
1576 &result.vars);
1577 AddIntegerVariableFromIntervals(helper, model, &result.vars,
1580
1581 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1582 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
1583 model](LinearConstraintManager* manager) {
1584 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1585 if (!demands_helper->CacheAllEnergyValues()) return true;
1586
1587 auto generate_cuts = [integer_trail, model, manager, helper, demands_helper,
1588 capacity](bool mirror) {
1589 std::vector<CtEvent> events;
1590 const auto& lp_values = manager->LpValues();
1591 for (int index = 0; index < helper->NumTasks(); ++index) {
1592 if (!helper->IsPresent(index)) continue;
1593 if (helper->SizeMin(index) > 0 &&
1594 demands_helper->DemandMin(index) > 0) {
1595 CtEvent event(index, helper);
1596 event.x_end = helper->Ends()[index];
1597 event.x_lp_end = event.x_end.LpValue(lp_values);
1598 event.y_size_min = demands_helper->DemandMin(index);
1599 event.energy_min = demands_helper->EnergyMin(index);
1600 event.decomposed_energy = demands_helper->DecomposedEnergies()[index];
1601 event.y_size_is_fixed = demands_helper->DemandIsFixed(index);
1602 events.push_back(event);
1603 }
1604 }
1605
1606 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
1607 const std::string mirror_str = mirror ? "_mirror" : "";
1609 absl::StrCat("CumulativeCompletionTimeExhaustive", mirror_str),
1610 events, capacity_max, model, manager);
1611
1613 absl::StrCat("CumulativeCompletionTimeQueyrane", mirror_str),
1614 std::move(events), capacity_max,
1615 /*skip_low_sizes=*/true, model, manager);
1616 };
1617 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1618 generate_cuts(false);
1619 if (!helper->SynchronizeAndSetTimeDirection(false)) return false;
1620 generate_cuts(true);
1621 return true;
1622 };
1623 return result;
1624}
1625
1626} // namespace sat
1627} // namespace operations_research
IntegerValue LowerBound(IntegerVariable i) const
Returns the current lower/upper bound of the given integer variable.
Definition integer.h:1301
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1305
IntegerValue FixedValue(IntegerVariable i) const
Checks that the variable is fixed and returns its value.
Definition integer.h:1313
bool IsFixed(IntegerVariable i) const
Checks if the variable is fixed.
Definition integer.h:1309
void AddQuadraticLowerBound(AffineExpression left, AffineExpression right, IntegerTrail *integer_trail, bool *is_quadratic=nullptr)
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff=IntegerValue(1))
void AddTerm(IntegerVariable var, IntegerValue coeff)
void AddLinearExpression(const LinearExpression &expr)
void AddConstant(IntegerValue value)
Adds the corresponding term to the current linear expression.
ABSL_MUST_USE_RESULT bool AddDecomposedProduct(absl::Span< const LiteralValueValue > product)
const util_intops::StrongVector< IntegerVariable, double > & LpValues()
To simplify CutGenerator api.
LiteralIndex Index() const
Definition sat_base.h:91
void AddChoices(absl::Span< const int64_t > choices)
Definition util.cc:514
void Add(int64_t value)
Add a value to the base set for which subset sums will be taken.
Definition util.cc:507
int NumTasks() const
Returns the number of task.
absl::Span< const AffineExpression > Sizes() const
absl::Span< const AffineExpression > Ends() const
ABSL_MUST_USE_RESULT bool SynchronizeAndSetTimeDirection(bool is_forward)
const std::vector< std::vector< LiteralValueValue > > & DecomposedEnergies() const
const std::vector< AffineExpression > & Demands() const
ABSL_MUST_USE_RESULT bool AddLinearizedDemand(int t, LinearConstraintBuilder *builder) const
void TransferToManager(LinearConstraintManager *manager)
Empty the local pool and add all its content to the manager.
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
Adds a cut to the local pool.
time_limit
Definition solve.cc:22
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:58
bool AddSquareTo(IntegerValue a, IntegerValue *result)
Computes result += a * a, and return false iff there is an overflow.
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Computes result += a * b, and return false iff there is an overflow.
CutGenerator CreateCumulativeCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, std::vector< CtEvent > events, IntegerValue capacity_max, bool skip_low_sizes, Model *model, LinearConstraintManager *manager)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
bool AddTo(IntegerValue a, IntegerValue *result)
void GenerateCutsBetweenPairOfNonOverlappingTasks(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< CachedIntervalData > events, IntegerValue capacity_max, Model *model, LinearConstraintManager *manager)
IntegerValue CeilRatio(IntegerValue dividend, IntegerValue positive_divisor)
const LiteralIndex kNoLiteralIndex(-1)
CutGenerator CreateCumulativePrecedenceCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void GenerateCumulativeEnergeticCuts(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< EnergyEvent > events, const AffineExpression &capacity, TimeLimit *time_limit, Model *model, LinearConstraintManager *manager)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
void GenerateShortCompletionTimeCutsWithExactBound(const std::string &cut_name, std::vector< CtEvent > events, IntegerValue capacity_max, Model *model, LinearConstraintManager *manager)
CutGenerator CreateCumulativeEnergyCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, const std::optional< AffineExpression > &makespan, Model *model)
void AddIntegerVariableFromIntervals(const SchedulingConstraintHelper *helper, Model *model, std::vector< IntegerVariable > *vars, int mask)
void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< EnergyEvent > events, IntegerValue capacity, AffineExpression makespan, TimeLimit *time_limit, Model *model, LinearConstraintManager *manager)
CutGenerator CreateCumulativeTimeTableCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void AppendVariablesFromCapacityAndDemands(const AffineExpression &capacity, SchedulingDemandHelper *demands_helper, Model *model, std::vector< IntegerVariable > *vars)
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, Model *model)
bool ComputeMinSumOfWeightedEndMins(std::vector< PermutableEvent > &events, IntegerValue capacity_max, IntegerValue &min_sum_of_end_mins, IntegerValue &min_sum_of_weighted_end_mins, IntegerValue unweighted_threshold, IntegerValue weighted_threshold)
IntegerValue CapAddI(IntegerValue a, IntegerValue b)
CutGenerator CreateNoOverlapEnergyCutGenerator(SchedulingConstraintHelper *helper, const std::optional< AffineExpression > &makespan, Model *model)
CutGenerator CreateNoOverlapPrecedenceCutGenerator(SchedulingConstraintHelper *helper, Model *model)
bool AtMinOrMaxInt64I(IntegerValue t)
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
Overflows and saturated arithmetic.
double ToDouble(IntegerValue value)
IntegerValue ComputeEnergyMinInWindow(IntegerValue start_min, IntegerValue start_max, IntegerValue end_min, IntegerValue end_max, IntegerValue size_min, IntegerValue demand_min, absl::Span< const LiteralValueValue > filtered_energy, IntegerValue window_start, IntegerValue window_end)
In SWIG mode, we don't want anything besides these top-level includes.
trees with all degrees equal w the current value of w
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
Returns the affine expression value under a given LP solution.
std::vector< LiteralValueValue > decomposed_energy
IntegerValue y_size_min
Cache of the bounds on the y direction.
IntegerValue x_start_min
Cache of the intervals bound on the x direction.
BaseEvent(int t, SchedulingConstraintHelper *x_helper)
IntegerValue energy_min
The energy min of this event.
CachedIntervalData(int t, SchedulingConstraintHelper *helper)
AffineExpression x_end
The lp value of the end of the x interval.
CtEvent(int t, SchedulingConstraintHelper *x_helper)
absl::AnyInvocable< bool(LinearConstraintManager *manager)> generate_cuts
Definition cuts.h:59
std::vector< IntegerVariable > vars
Definition cuts.h:58
AffineExpression y_size
We need this for linearizing the energy in some cases.
bool energy_is_quadratic
True if linearized_energy is not exact and a McCormick relaxation.
LiteralIndex presence_literal_index
If set, this event is optional and its presence is controlled by this.
EnergyEvent(int t, SchedulingConstraintHelper *x_helper)
ABSL_MUST_USE_RESULT bool FillEnergyLp(AffineExpression x_size, const util_intops::StrongVector< IntegerVariable, double > &lp_values, Model *model)
IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const