Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
scheduling_cuts.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdint.h>
17
18#include <algorithm>
19#include <cmath>
20#include <functional>
21#include <limits>
22#include <optional>
23#include <string>
24#include <tuple>
25#include <utility>
26#include <vector>
27
28#include "absl/base/attributes.h"
29#include "absl/container/btree_set.h"
30#include "absl/container/flat_hash_map.h"
31#include "absl/log/check.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/string_view.h"
34#include "absl/types/span.h"
38#include "ortools/sat/cuts.h"
39#include "ortools/sat/integer.h"
43#include "ortools/sat/model.h"
45#include "ortools/sat/util.h"
49
50namespace operations_research {
51namespace sat {
52
53namespace {
54// Minimum amount of violation of the cut constraint by the solution. This
55// is needed to avoid numerical issues and adding cuts with minor effect.
56const double kMinCutViolation = 1e-4;
57} // namespace
58
60 : x_start_min(x_helper->StartMin(t)),
61 x_start_max(x_helper->StartMax(t)),
62 x_end_min(x_helper->EndMin(t)),
63 x_end_max(x_helper->EndMax(t)),
64 x_size_min(x_helper->SizeMin(t)) {}
65
68 : BaseEvent(t, x_helper) {}
69
70 // We need this for linearizing the energy in some cases.
72
73 // If set, this event is optional and its presence is controlled by this.
75
76 // A linear expression which is a valid lower bound on the total energy of
77 // this event. We also cache the activity of the expression to not recompute
78 // it all the time.
81
82 // True if linearized_energy is not exact and a McCormick relaxation.
83 bool energy_is_quadratic = false;
84
85 // The actual value of the presence literal of the interval(s) is checked
86 // when the event is created. A value of kNoLiteralIndex indicates that either
87 // the interval was not optional, or that its presence literal is true at
88 // level zero.
90
91 // Computes the mandatory minimal overlap of the interval with the time window
92 // [start, end].
93 IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const {
94 return std::max(std::min({x_end_min - start, end - x_start_max, x_size_min,
95 end - start}),
96 IntegerValue(0));
97 }
98
99 // This method expects all the other fields to have been filled before.
100 // It must be called before the EnergyEvent is used.
101 ABSL_MUST_USE_RESULT bool FillEnergyLp(
102 AffineExpression x_size,
104 Model* model) {
105 LinearConstraintBuilder tmp_energy(model);
106 if (IsPresent()) {
107 if (!decomposed_energy.empty()) {
108 if (!tmp_energy.AddDecomposedProduct(decomposed_energy)) return false;
109 } else {
110 tmp_energy.AddQuadraticLowerBound(x_size, y_size,
111 model->GetOrCreate<IntegerTrail>(),
113 }
114 } else {
116 energy_min)) {
117 return false;
118 }
119 }
120 linearized_energy = tmp_energy.BuildExpression();
122 return true;
123 }
124
125 std::string DebugString() const {
126 return absl::StrCat(
127 "EnergyEvent(x_start_min = ", x_start_min.value(),
128 ", x_start_max = ", x_start_max.value(),
129 ", x_end_min = ", x_end_min.value(),
130 ", x_end_max = ", x_end_max.value(),
131 ", y_size = ", y_size.DebugString(), ", energy = ",
132 decomposed_energy.empty()
133 ? "{}"
134 : absl::StrCat(decomposed_energy.size(), " terms"),
135 ", presence_literal_index = ", presence_literal_index.value(), ")");
136 }
137};
138
139namespace {
140
141// Compute the energetic contribution of a task in a given time window, and
142// add it to the cut. It returns false if it tried to generate the cut, and
143// failed.
144ABSL_MUST_USE_RESULT bool AddOneEvent(
145 const EnergyEvent& event, IntegerValue window_start,
146 IntegerValue window_end, LinearConstraintBuilder* cut,
147 bool* add_energy_to_name = nullptr, bool* add_quadratic_to_name = nullptr,
148 bool* add_opt_to_name = nullptr, bool* add_lifted_to_name = nullptr) {
149 DCHECK(cut != nullptr);
150
151 if (event.x_end_min <= window_start || event.x_start_max >= window_end) {
152 return true; // Event can move outside the time window.
153 }
154
155 if (event.x_start_min >= window_start && event.x_end_max <= window_end) {
156 // Event is always contained by the time window.
157 cut->AddLinearExpression(event.linearized_energy);
158
159 if (event.energy_is_quadratic && add_quadratic_to_name != nullptr) {
160 *add_quadratic_to_name = true;
161 }
162 if (add_energy_to_name != nullptr &&
163 event.energy_min > event.x_size_min * event.y_size_min) {
164 *add_energy_to_name = true;
165 }
166 if (!event.IsPresent() && add_opt_to_name != nullptr) {
167 *add_opt_to_name = true;
168 }
169 } else { // The event has a mandatory overlap with the time window.
170 const IntegerValue min_overlap =
171 event.GetMinOverlap(window_start, window_end);
172 if (min_overlap <= 0) return true;
173 if (add_lifted_to_name != nullptr) *add_lifted_to_name = true;
174
175 if (event.IsPresent()) {
176 const std::vector<LiteralValueValue>& energy = event.decomposed_energy;
177 if (energy.empty()) {
178 cut->AddTerm(event.y_size, min_overlap);
179 } else {
180 const IntegerValue window_size = window_end - window_start;
181 for (const auto [lit, fixed_size, fixed_demand] : energy) {
182 const IntegerValue alt_end_min =
183 std::max(event.x_end_min, event.x_start_min + fixed_size);
184 const IntegerValue alt_start_max =
185 std::min(event.x_start_max, event.x_end_max - fixed_size);
186 const IntegerValue energy_min =
187 fixed_demand *
188 std::min({alt_end_min - window_start, window_end - alt_start_max,
189 fixed_size, window_size});
190 if (energy_min == 0) continue;
191 if (!cut->AddLiteralTerm(lit, energy_min)) return false;
192 }
193 if (add_energy_to_name != nullptr) *add_energy_to_name = true;
194 }
195 } else {
196 if (add_opt_to_name != nullptr) *add_opt_to_name = true;
197 const IntegerValue min_energy = ComputeEnergyMinInWindow(
198 event.x_start_min, event.x_start_max, event.x_end_min,
199 event.x_end_max, event.x_size_min, event.y_size_min,
200 event.decomposed_energy, window_start, window_end);
201 if (min_energy > event.x_size_min * event.y_size_min &&
202 add_energy_to_name != nullptr) {
203 *add_energy_to_name = true;
204 }
205 if (!cut->AddLiteralTerm(Literal(event.presence_literal_index),
206 min_energy)) {
207 return false;
208 }
209 }
210 }
211 return true;
212}
213
214// Returns the list of all possible demand values for the given event.
215// It returns an empty vector is the number of values is too large.
216std::vector<int64_t> FindPossibleDemands(const EnergyEvent& event,
217 const VariablesAssignment& assignment,
218 IntegerTrail* integer_trail) {
219 std::vector<int64_t> possible_demands;
220 if (event.decomposed_energy.empty()) {
221 if (integer_trail->IsFixed(event.y_size)) {
222 possible_demands.push_back(
223 integer_trail->FixedValue(event.y_size).value());
224 } else {
225 if (integer_trail->InitialVariableDomain(event.y_size.var).Size() >
226 1000000) {
227 return {};
228 }
229 for (const int64_t var_value :
230 integer_trail->InitialVariableDomain(event.y_size.var).Values()) {
231 possible_demands.push_back(event.y_size.ValueAt(var_value).value());
232 }
233 }
234 } else {
235 for (const auto [lit, fixed_size, fixed_demand] : event.decomposed_energy) {
236 if (assignment.LiteralIsFalse(lit)) continue;
237 possible_demands.push_back(fixed_demand.value());
238 }
239 }
240 return possible_demands;
241}
242
243// This generates the actual cut and compute its activity vs the
244// available_energy_lp.
245bool CutIsEfficient(
246 absl::Span<const EnergyEvent> events, IntegerValue window_start,
247 IntegerValue window_end, double available_energy_lp,
249 LinearConstraintBuilder* temp_builder) {
250 temp_builder->Clear();
251 for (const EnergyEvent& event : events) {
252 if (!AddOneEvent(event, window_start, window_end, temp_builder)) {
253 return false;
254 }
255 }
256 return temp_builder->BuildExpression().LpValue(lp_values) >=
257 available_energy_lp * (1.0 + kMinCutViolation);
258}
259
260} // namespace
261
262// This cumulative energetic cut generator will split the cumulative span in 2
263// regions.
264//
265// In the region before the min of the makespan, we will compute a more
266// precise reachable profile and have a better estimation of the energy
267// available between two time point. the improvement can come from two sources:
268// - subset sum indicates that the max capacity cannot be reached.
269// - sum of demands < max capacity.
270//
271// In the region after the min of the makespan, we will use
272// fixed_capacity * (makespan - makespan_min)
273// as the available energy.
275 absl::string_view cut_name,
277 std::vector<EnergyEvent> events, IntegerValue capacity,
279 LinearConstraintManager* manager) {
280 // Checks the precondition of the code.
281 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
282 DCHECK(integer_trail->IsFixed(capacity));
283
284 const VariablesAssignment& assignment =
285 model->GetOrCreate<Trail>()->Assignment();
286
287 // Currently, we look at all the possible time windows, and will push all cuts
288 // in the TopNCuts object. From our observations, this generator creates only
289 // a few cuts for a given run.
290 //
291 // The complexity of this loop is n^3. if we follow the latest research, we
292 // could implement this in n log^2(n). Still, this is not visible in the
293 // profile as we only this method at the root node,
294 struct OverloadedTimeWindowWithMakespan {
295 IntegerValue start;
296 IntegerValue end;
297 IntegerValue fixed_energy_rhs; // Can be complemented by the makespan.
298 bool use_makespan = false;
299 bool use_subset_sum = false;
300 };
301
302 std::vector<OverloadedTimeWindowWithMakespan> overloaded_time_windows;
303 // Compute relevant time points.
304 // TODO(user): We could reduce this set.
305 // TODO(user): we can compute the max usage between makespan_min and
306 // makespan_max.
307 std::vector<IntegerValue> time_points;
308 std::vector<std::vector<int64_t>> possible_demands(events.size());
309 const IntegerValue makespan_min = integer_trail->LowerBound(makespan);
310 IntegerValue max_end_min = kMinIntegerValue; // Used to abort early.
311 IntegerValue max_end_max = kMinIntegerValue; // Used as a sentinel.
312 for (int i = 0; i < events.size(); ++i) {
313 const EnergyEvent& event = events[i];
314 if (event.x_start_min < makespan_min) {
315 time_points.push_back(event.x_start_min);
316 }
317 if (event.x_start_max < makespan_min) {
318 time_points.push_back(event.x_start_max);
319 }
320 if (event.x_end_min < makespan_min) {
321 time_points.push_back(event.x_end_min);
322 }
323 if (event.x_end_max < makespan_min) {
324 time_points.push_back(event.x_end_max);
325 }
326 max_end_min = std::max(max_end_min, event.x_end_min);
327 max_end_max = std::max(max_end_max, event.x_end_max);
328 possible_demands[i] = FindPossibleDemands(event, assignment, integer_trail);
329 }
330 time_points.push_back(makespan_min);
331 time_points.push_back(max_end_max);
333
334 const int num_time_points = time_points.size();
335 absl::flat_hash_map<IntegerValue, IntegerValue> reachable_capacity_ending_at;
336
337 MaxBoundedSubsetSum reachable_capacity_subset_sum(capacity.value());
338 for (int i = 1; i < num_time_points; ++i) {
339 const IntegerValue window_start = time_points[i - 1];
340 const IntegerValue window_end = time_points[i];
341 reachable_capacity_subset_sum.Reset(capacity.value());
342 for (int i = 0; i < events.size(); ++i) {
343 const EnergyEvent& event = events[i];
344 if (event.x_start_min >= window_end || event.x_end_max <= window_start) {
345 continue;
346 }
347 if (possible_demands[i].empty()) { // Number of values was too large.
348 // In practice, it stops the DP as the upper bound is reached.
349 reachable_capacity_subset_sum.Add(capacity.value());
350 } else {
351 reachable_capacity_subset_sum.AddChoices(possible_demands[i]);
352 }
353 if (reachable_capacity_subset_sum.CurrentMax() == capacity.value()) break;
354 }
355 reachable_capacity_ending_at[window_end] =
356 reachable_capacity_subset_sum.CurrentMax();
357 }
358
359 const double capacity_lp = ToDouble(capacity);
360 const double makespan_lp = makespan.LpValue(lp_values);
361 const double makespan_min_lp = ToDouble(makespan_min);
362 LinearConstraintBuilder temp_builder(model);
363 for (int i = 0; i + 1 < num_time_points; ++i) {
364 // Checks the time limit if the problem is too big.
365 if (events.size() > 50 && time_limit->LimitReached()) return;
366
367 const IntegerValue window_start = time_points[i];
368 // After max_end_min, all tasks can fit before window_start.
369 if (window_start >= max_end_min) break;
370
371 IntegerValue cumulated_max_energy = 0;
372 IntegerValue cumulated_max_energy_before_makespan_min = 0;
373 bool use_subset_sum = false;
374 bool use_subset_sum_before_makespan_min = false;
375
376 for (int j = i + 1; j < num_time_points; ++j) {
377 const IntegerValue strip_start = time_points[j - 1];
378 const IntegerValue window_end = time_points[j];
379 const IntegerValue max_reachable_capacity_in_current_strip =
380 reachable_capacity_ending_at[window_end];
381 DCHECK_LE(max_reachable_capacity_in_current_strip, capacity);
382
383 // Update states for the name of the generated cut.
384 if (max_reachable_capacity_in_current_strip < capacity) {
385 use_subset_sum = true;
386 if (window_end <= makespan_min) {
387 use_subset_sum_before_makespan_min = true;
388 }
389 }
390
391 const IntegerValue energy_in_strip =
392 (window_end - strip_start) * max_reachable_capacity_in_current_strip;
393 cumulated_max_energy += energy_in_strip;
394 if (window_end <= makespan_min) {
395 cumulated_max_energy_before_makespan_min += energy_in_strip;
396 }
397
398 if (window_start >= makespan_min) {
399 DCHECK_EQ(cumulated_max_energy_before_makespan_min, 0);
400 }
401 DCHECK_LE(cumulated_max_energy, capacity * (window_end - window_start));
402 const double max_energy_up_to_makespan_lp =
403 strip_start >= makespan_min
404 ? ToDouble(cumulated_max_energy_before_makespan_min) +
405 (makespan_lp - makespan_min_lp) * capacity_lp
406 : std::numeric_limits<double>::infinity();
407
408 // We prefer using the makespan as the cut will tighten itself when the
409 // objective value is improved.
410 //
411 // We reuse the min cut violation to allow some slack in the comparison
412 // between the two computed energy values.
413 const bool use_makespan =
414 max_energy_up_to_makespan_lp <=
415 ToDouble(cumulated_max_energy) + kMinCutViolation;
416 const double available_energy_lp = use_makespan
417 ? max_energy_up_to_makespan_lp
418 : ToDouble(cumulated_max_energy);
419 if (CutIsEfficient(events, window_start, window_end, available_energy_lp,
420 lp_values, &temp_builder)) {
421 OverloadedTimeWindowWithMakespan w;
422 w.start = window_start;
423 w.end = window_end;
424 w.fixed_energy_rhs = use_makespan
425 ? cumulated_max_energy_before_makespan_min
426 : cumulated_max_energy;
427 w.use_makespan = use_makespan;
428 w.use_subset_sum =
429 use_makespan ? use_subset_sum_before_makespan_min : use_subset_sum;
430 overloaded_time_windows.push_back(std::move(w));
431 }
432 }
433 }
434
435 if (overloaded_time_windows.empty()) return;
436
437 VLOG(3) << "GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity: "
438 << events.size() << " events, " << time_points.size()
439 << " time points, " << overloaded_time_windows.size()
440 << " overloads detected";
441
442 TopNCuts top_n_cuts(5);
443 for (const auto& w : overloaded_time_windows) {
444 bool cut_generated = true;
445 bool add_opt_to_name = false;
446 bool add_lifted_to_name = false;
447 bool add_quadratic_to_name = false;
448 bool add_energy_to_name = false;
449 LinearConstraintBuilder cut(model, kMinIntegerValue, w.fixed_energy_rhs);
450
451 if (w.use_makespan) { // Add the energy from makespan_min to makespan.
452 cut.AddConstant(makespan_min * capacity);
453 cut.AddTerm(makespan, -capacity);
454 }
455
456 // Add contributions from all events.
457 for (const EnergyEvent& event : events) {
458 if (!AddOneEvent(event, w.start, w.end, &cut, &add_energy_to_name,
459 &add_quadratic_to_name, &add_opt_to_name,
460 &add_lifted_to_name)) {
461 cut_generated = false;
462 break; // Exit the event loop.
463 }
464 }
465
466 if (cut_generated) {
467 std::string full_name(cut_name);
468 if (add_opt_to_name) full_name.append("_optional");
469 if (add_quadratic_to_name) full_name.append("_quadratic");
470 if (add_lifted_to_name) full_name.append("_lifted");
471 if (add_energy_to_name) full_name.append("_energy");
472 if (w.use_makespan) full_name.append("_makespan");
473 if (w.use_subset_sum) full_name.append("_subsetsum");
474 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
475 }
476 }
477
478 top_n_cuts.TransferToManager(manager);
479}
480
482 const std::string& cut_name,
484 std::vector<EnergyEvent> events, const AffineExpression& capacity,
486 double max_possible_energy_lp = 0.0;
487 for (const EnergyEvent& event : events) {
488 max_possible_energy_lp += event.linearized_energy_lp_value;
489 }
490
491 // Currently, we look at all the possible time windows, and will push all cuts
492 // in the TopNCuts object. From our observations, this generator creates only
493 // a few cuts for a given run.
494 //
495 // The complexity of this loop is n^3. if we follow the latest research, we
496 // could implement this in n log^2(n). Still, this is not visible in the
497 // profile as we only this method at the root node,
498 struct OverloadedTimeWindow {
499 IntegerValue start;
500 IntegerValue end;
501 };
502 std::vector<OverloadedTimeWindow> overloaded_time_windows;
503 const double capacity_lp = capacity.LpValue(lp_values);
504
505 // Compute relevant time points.
506 // TODO(user): We could reduce this set.
507 absl::btree_set<IntegerValue> time_points_set;
508 IntegerValue max_end_min = kMinIntegerValue;
509 for (const EnergyEvent& event : events) {
510 time_points_set.insert(event.x_start_min);
511 time_points_set.insert(event.x_start_max);
512 time_points_set.insert(event.x_end_min);
513 time_points_set.insert(event.x_end_max);
514 max_end_min = std::max(max_end_min, event.x_end_min);
515 }
516 const std::vector<IntegerValue> time_points(time_points_set.begin(),
517 time_points_set.end());
518 const int num_time_points = time_points.size();
519
520 LinearConstraintBuilder temp_builder(model);
521 for (int i = 0; i + 1 < num_time_points; ++i) {
522 // Checks the time limit if the problem is too big.
523 if (events.size() > 50 && time_limit->LimitReached()) return;
524
525 const IntegerValue window_start = time_points[i];
526 // After max_end_min, all tasks can fit before window_start.
527 if (window_start >= max_end_min) break;
528
529 for (int j = i + 1; j < num_time_points; ++j) {
530 const IntegerValue window_end = time_points[j];
531 const double available_energy_lp =
532 ToDouble(window_end - window_start) * capacity_lp;
533 if (available_energy_lp >= max_possible_energy_lp) break;
534 if (CutIsEfficient(events, window_start, window_end, available_energy_lp,
535 lp_values, &temp_builder)) {
536 overloaded_time_windows.push_back({window_start, window_end});
537 }
538 }
539 }
540
541 if (overloaded_time_windows.empty()) return;
542
543 VLOG(3) << "GenerateCumulativeEnergeticCuts: " << events.size() << " events, "
544 << time_points.size() << " time points, "
545 << overloaded_time_windows.size() << " overloads detected";
546
547 TopNCuts top_n_cuts(5);
548 for (const auto& [window_start, window_end] : overloaded_time_windows) {
549 bool cut_generated = true;
550 bool add_opt_to_name = false;
551 bool add_lifted_to_name = false;
552 bool add_quadratic_to_name = false;
553 bool add_energy_to_name = false;
554 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
555
556 // Compute the max energy available for the tasks.
557 cut.AddTerm(capacity, window_start - window_end);
558
559 // Add all contributions.
560 for (const EnergyEvent& event : events) {
561 if (!AddOneEvent(event, window_start, window_end, &cut,
562 &add_energy_to_name, &add_quadratic_to_name,
563 &add_opt_to_name, &add_lifted_to_name)) {
564 cut_generated = false;
565 break; // Exit the event loop.
566 }
567 }
568
569 if (cut_generated) {
570 std::string full_name = cut_name;
571 if (add_opt_to_name) full_name.append("_optional");
572 if (add_quadratic_to_name) full_name.append("_quadratic");
573 if (add_lifted_to_name) full_name.append("_lifted");
574 if (add_energy_to_name) full_name.append("_energy");
575 top_n_cuts.AddCut(cut.Build(), full_name, lp_values);
576 }
577 }
578
579 top_n_cuts.TransferToManager(manager);
580}
581
583 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
584 const AffineExpression& capacity,
585 const std::optional<AffineExpression>& makespan, Model* model) {
586 CutGenerator result;
587 result.only_run_at_level_zero = true;
588 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
589 &result.vars);
591 if (makespan.has_value() && !makespan.value().IsConstant()) {
592 result.vars.push_back(makespan.value().var);
593 }
595 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
596 TimeLimit* time_limit = model->GetOrCreate<TimeLimit>();
597
598 result.generate_cuts = [makespan, capacity, demands_helper, helper,
599 integer_trail, time_limit,
600 model](LinearConstraintManager* manager) {
601 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
602 demands_helper->CacheAllEnergyValues();
603
604 const auto& lp_values = manager->LpValues();
605 std::vector<EnergyEvent> events;
606 for (int i = 0; i < helper->NumTasks(); ++i) {
607 if (helper->IsAbsent(i)) continue;
608 // TODO(user): use level 0 bounds ?
609 if (demands_helper->DemandMax(i) == 0 || helper->SizeMin(i) == 0) {
610 continue;
611 }
612
613 EnergyEvent e(i, helper);
614 e.y_size = demands_helper->Demands()[i];
615 e.y_size_min = demands_helper->DemandMin(i);
616 e.decomposed_energy = demands_helper->DecomposedEnergies()[i];
617 e.energy_min = demands_helper->EnergyMin(i);
618 e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i);
619 if (!helper->IsPresent(i)) {
621 }
622 // We can always skip events.
623 if (!e.FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
624 events.push_back(e);
625 }
626
627 if (makespan.has_value() && integer_trail->IsFixed(capacity)) {
629 "CumulativeEnergyM", lp_values, events,
630 integer_trail->FixedValue(capacity), makespan.value(), time_limit,
631 model, manager);
632
633 } else {
634 GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, events,
635 capacity, time_limit, model, manager);
636 }
637 return true;
638 };
639
640 return result;
641}
642
645 const std::optional<AffineExpression>& makespan, Model* model) {
646 CutGenerator result;
647 result.only_run_at_level_zero = true;
649 if (makespan.has_value() && !makespan.value().IsConstant()) {
650 result.vars.push_back(makespan.value().var);
651 }
653 TimeLimit* time_limit = model->GetOrCreate<TimeLimit>();
654
655 result.generate_cuts = [makespan, helper, time_limit,
656 model](LinearConstraintManager* manager) {
657 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
658
659 const auto& lp_values = manager->LpValues();
660 std::vector<EnergyEvent> events;
661 for (int i = 0; i < helper->NumTasks(); ++i) {
662 if (helper->IsAbsent(i)) continue;
663 if (helper->SizeMin(i) == 0) continue;
664
665 EnergyEvent e(i, helper);
666 e.y_size = IntegerValue(1);
667 e.y_size_min = IntegerValue(1);
669 if (!helper->IsPresent(i)) {
671 }
672 // We can always skip events.
673 if (!e.FillEnergyLp(helper->Sizes()[i], lp_values, model)) continue;
674 events.push_back(e);
675 }
676
677 if (makespan.has_value()) {
679 "NoOverlapEnergyM", lp_values, events,
680 /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model,
681 manager);
682 } else {
683 GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, events,
684 /*capacity=*/IntegerValue(1), time_limit,
685 model, manager);
686 }
687 return true;
688 };
689 return result;
690}
691
693 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
694 const AffineExpression& capacity, Model* model) {
695 CutGenerator result;
696 result.only_run_at_level_zero = true;
697 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
698 &result.vars);
701
702 struct TimeTableEvent {
703 int interval_index;
704 IntegerValue time;
706 double demand_lp = 0.0;
707 bool is_positive = false;
708 bool use_energy = false;
709 bool is_optional = false;
710 };
711
712 result.generate_cuts = [helper, capacity, demands_helper,
713 model](LinearConstraintManager* manager) {
714 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
715 demands_helper->CacheAllEnergyValues();
716
717 TopNCuts top_n_cuts(5);
718 std::vector<TimeTableEvent> events;
719 const auto& lp_values = manager->LpValues();
720 const double capacity_lp = capacity.LpValue(lp_values);
721
722 // Iterate through the intervals. If start_max < end_min, the demand
723 // is mandatory.
724 for (int i = 0; i < helper->NumTasks(); ++i) {
725 if (helper->IsAbsent(i)) continue;
726
727 const IntegerValue start_max = helper->StartMax(i);
728 const IntegerValue end_min = helper->EndMin(i);
729
730 if (start_max >= end_min) continue;
731
732 TimeTableEvent e1;
733 e1.interval_index = i;
734 e1.time = start_max;
735 {
737 // Ignore the interval if the linearized demand fails.
738 if (!demands_helper->AddLinearizedDemand(i, &builder)) continue;
739 e1.demand = builder.BuildExpression();
740 }
741 e1.demand_lp = e1.demand.LpValue(lp_values);
742 e1.is_positive = true;
743 e1.use_energy = !demands_helper->DecomposedEnergies()[i].empty();
744 e1.is_optional = !helper->IsPresent(i);
745
746 TimeTableEvent e2 = e1;
747 e2.time = end_min;
748 e2.is_positive = false;
749
750 events.push_back(e1);
751 events.push_back(e2);
752 }
753
754 // Sort events by time.
755 // It is also important that all positive event with the same time as
756 // negative events appear after for the correctness of the algo below.
757 std::sort(events.begin(), events.end(),
758 [](const TimeTableEvent& i, const TimeTableEvent& j) {
759 if (i.time == j.time) {
760 if (i.is_positive == j.is_positive) {
761 return i.interval_index < j.interval_index;
762 }
763 return !i.is_positive;
764 }
765 return i.time < j.time;
766 });
767
768 double sum_of_demand_lp = 0.0;
769 bool positive_event_added_since_last_check = false;
770 for (int i = 0; i < events.size(); ++i) {
771 const TimeTableEvent& e = events[i];
772 if (e.is_positive) {
773 positive_event_added_since_last_check = true;
774 sum_of_demand_lp += e.demand_lp;
775 continue;
776 }
777
778 if (positive_event_added_since_last_check) {
779 // Reset positive event added. We do not want to create cuts for
780 // each negative event in sequence.
781 positive_event_added_since_last_check = false;
782
783 if (sum_of_demand_lp >= capacity_lp + kMinCutViolation) {
784 // Create cut.
785 bool use_energy = false;
786 bool use_optional = false;
787 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
788 cut.AddTerm(capacity, IntegerValue(-1));
789 // The i-th event, which is a negative event, follows a positive
790 // event. We must ignore it in our cut generation.
791 DCHECK(!events[i].is_positive);
792 const IntegerValue time_point = events[i - 1].time;
793
794 for (int j = 0; j < i; ++j) {
795 const TimeTableEvent& cut_event = events[j];
796 const int t = cut_event.interval_index;
797 DCHECK_LE(helper->StartMax(t), time_point);
798 if (!cut_event.is_positive || helper->EndMin(t) <= time_point) {
799 continue;
800 }
801
802 cut.AddLinearExpression(cut_event.demand, IntegerValue(1));
803 use_energy |= cut_event.use_energy;
804 use_optional |= cut_event.is_optional;
805 }
806
807 std::string cut_name = "CumulativeTimeTable";
808 if (use_optional) cut_name += "_optional";
809 if (use_energy) cut_name += "_energy";
810 top_n_cuts.AddCut(cut.Build(), cut_name, lp_values);
811 }
812 }
813
814 // The demand_lp was added in case of a positive event. We need to
815 // remove it for a negative event.
816 sum_of_demand_lp -= e.demand_lp;
817 }
818 top_n_cuts.TransferToManager(manager);
819 return true;
820 };
821 return result;
822}
823
824// Cached Information about one interval.
825// Note that everything must correspond to level zero bounds, otherwise the
826// generated cut are not valid.
827
830 : start_min(helper->StartMin(t)),
831 start_max(helper->StartMax(t)),
832 start(helper->Starts()[t]),
833 end_min(helper->EndMin(t)),
834 end_max(helper->EndMax(t)),
835 end(helper->Ends()[t]),
836 size_min(helper->SizeMin(t)) {}
837
838 IntegerValue start_min;
839 IntegerValue start_max;
841 IntegerValue end_min;
842 IntegerValue end_max;
844 IntegerValue size_min;
845
846 IntegerValue demand_min;
847};
848
850 absl::string_view cut_name,
852 std::vector<CachedIntervalData> events, IntegerValue capacity_max,
854 TopNCuts top_n_cuts(5);
855 const int num_events = events.size();
856 if (num_events <= 1) return;
857
858 std::sort(events.begin(), events.end(),
859 [](const CachedIntervalData& e1, const CachedIntervalData& e2) {
860 return e1.start_min < e2.start_min ||
861 (e1.start_min == e2.start_min && e1.end_max < e2.end_max);
862 });
863
864 // Balas disjunctive cuts on 2 tasks a and b:
865 // start_1 * (duration_1 + start_min_1 - start_min_2) +
866 // start_2 * (duration_2 + start_min_2 - start_min_1) >=
867 // duration_1 * duration_2 +
868 // start_min_1 * duration_2 +
869 // start_min_2 * duration_1
870 // From: David L. Applegate, William J. Cook:
871 // A Computational Study of the Job-Shop Scheduling Problem. 149-156
872 // INFORMS Journal on Computing, Volume 3, Number 1, Winter 1991
873 const auto add_balas_disjunctive_cut =
874 [&](absl::string_view local_cut_name, IntegerValue start_min_1,
875 IntegerValue duration_min_1, AffineExpression start_1,
876 IntegerValue start_min_2, IntegerValue duration_min_2,
877 AffineExpression start_2) {
878 // Checks hypothesis from the cut.
879 if (start_min_2 >= start_min_1 + duration_min_1 ||
880 start_min_1 >= start_min_2 + duration_min_2) {
881 return;
882 }
883 const IntegerValue coeff_1 = duration_min_1 + start_min_1 - start_min_2;
884 const IntegerValue coeff_2 = duration_min_2 + start_min_2 - start_min_1;
885 const IntegerValue rhs = duration_min_1 * duration_min_2 +
886 duration_min_1 * start_min_2 +
887 duration_min_2 * start_min_1;
888
889 if (ToDouble(coeff_1) * start_1.LpValue(lp_values) +
890 ToDouble(coeff_2) * start_2.LpValue(lp_values) <=
891 ToDouble(rhs) - kMinCutViolation) {
893 cut.AddTerm(start_1, coeff_1);
894 cut.AddTerm(start_2, coeff_2);
895 top_n_cuts.AddCut(cut.Build(), local_cut_name, lp_values);
896 }
897 };
898
899 for (int i = 0; i + 1 < num_events; ++i) {
900 const CachedIntervalData& e1 = events[i];
901 for (int j = i + 1; j < num_events; ++j) {
902 const CachedIntervalData& e2 = events[j];
903 if (e2.start_min >= e1.end_max) break; // Break out of the index2 loop.
904
905 // Encode only the interesting pairs.
906 if (e1.demand_min + e2.demand_min <= capacity_max) continue;
907
908 const bool interval_1_can_precede_2 = e1.end_min <= e2.start_max;
909 const bool interval_2_can_precede_1 = e2.end_min <= e1.start_max;
910
911 if (interval_1_can_precede_2 && !interval_2_can_precede_1 &&
912 e1.end.LpValue(lp_values) >=
913 e2.start.LpValue(lp_values) + kMinCutViolation) {
914 // interval_1.end <= interval_2.start
915 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
916 cut.AddTerm(e1.end, IntegerValue(1));
917 cut.AddTerm(e2.start, IntegerValue(-1));
918 top_n_cuts.AddCut(cut.Build(),
919 absl::StrCat(cut_name, "DetectedPrecedence"),
920 lp_values);
921 } else if (interval_2_can_precede_1 && !interval_1_can_precede_2 &&
922 e2.end.LpValue(lp_values) >=
923 e1.start.LpValue(lp_values) + kMinCutViolation) {
924 // interval_2.end <= interval_1.start
925 LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0));
926 cut.AddTerm(e2.end, IntegerValue(1));
927 cut.AddTerm(e1.start, IntegerValue(-1));
928 top_n_cuts.AddCut(cut.Build(),
929 absl::StrCat(cut_name, "DetectedPrecedence"),
930 lp_values);
931 } else {
932 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnStart"),
933 e1.start_min, e1.size_min, e1.start,
934 e2.start_min, e2.size_min, e2.start);
935 add_balas_disjunctive_cut(absl::StrCat(cut_name, "DisjunctionOnEnd"),
936 -e1.end_max, e1.size_min, e1.end.Negated(),
937 -e2.end_max, e2.size_min, e2.end.Negated());
938 }
939 }
940 }
941
942 top_n_cuts.TransferToManager(manager);
943}
944
946 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
947 const AffineExpression& capacity, Model* model) {
948 CutGenerator result;
949 result.only_run_at_level_zero = true;
950 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
951 &result.vars);
954
955 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
956 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
957 model](LinearConstraintManager* manager) {
958 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
959
960 std::vector<CachedIntervalData> events;
961 for (int t = 0; t < helper->NumTasks(); ++t) {
962 if (!helper->IsPresent(t)) continue;
963 CachedIntervalData event(t, helper);
964 event.demand_min = demands_helper->DemandMin(t);
965 events.push_back(event);
966 }
967
968 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
970 "Cumulative", manager->LpValues(), std::move(events), capacity_max,
971 model, manager);
972 return true;
973 };
974 return result;
975}
976
979 CutGenerator result;
980 result.only_run_at_level_zero = true;
983
984 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
985 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
986
987 std::vector<CachedIntervalData> events;
988 for (int t = 0; t < helper->NumTasks(); ++t) {
989 if (!helper->IsPresent(t)) continue;
990 CachedIntervalData event(t, helper);
991 event.demand_min = IntegerValue(1);
992 events.push_back(event);
993 }
994
996 "NoOverlap", manager->LpValues(), std::move(events), IntegerValue(1),
997 model, manager);
998 return true;
999 };
1000
1001 return result;
1002}
1003
1004CtEvent::CtEvent(int t, SchedulingConstraintHelper* x_helper)
1005 : BaseEvent(t, x_helper) {}
1006
1007std::string CtEvent::DebugString() const {
1008 return absl::StrCat("CtEvent(x_end = ", x_end.DebugString(),
1009 ", x_start_min = ", x_start_min.value(),
1010 ", x_start_max = ", x_start_max.value(),
1011 ", x_size_min = ", x_size_min.value(),
1012 ", x_lp_end = ", x_lp_end,
1013 ", y_size_min = ", y_size_min.value(),
1014 ", energy_min = ", energy_min.value(),
1015 ", use_energy = ", use_energy, ", lifted = ", lifted);
1016}
1017
1018namespace {
1019
1020// This functions packs all events in a cumulative of capacity 'capacity_max'
1021// following the given permutation. It returns the sum of end mins and the sum
1022// of end mins weighted by event.weight.
1023//
1024// It ensures that if event_j is after event_i in the permutation, then event_j
1025// starts exactly at the same time or after event_i.
1026//
1027// It returns false if one event cannot start before event.start_max.
1028bool ComputeWeightedSumOfEndMinsForOnePermutation(
1029 absl::Span<const PermutableEvent> events, IntegerValue capacity_max,
1030 IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends,
1031 std::vector<std::pair<IntegerValue, IntegerValue>>& profile,
1032 std::vector<std::pair<IntegerValue, IntegerValue>>& new_profile) {
1033 sum_of_ends = 0;
1034 sum_of_weighted_ends = 0;
1035
1036 // The profile (and new profile) is a set of (time, capa_left) pairs, ordered
1037 // by increasing time and capa_left.
1038 profile.clear();
1039 profile.emplace_back(kMinIntegerValue, capacity_max);
1040 profile.emplace_back(kMaxIntegerValue, capacity_max);
1041 IntegerValue start_of_previous_task = kMinIntegerValue;
1042 for (const PermutableEvent& event : events) {
1043 const IntegerValue start_min =
1044 std::max(event.start_min, start_of_previous_task);
1045
1046 // Iterate on the profile to find the step that contains start_min.
1047 // Then push until we find a step with enough capacity.
1048 int current = 0;
1049 while (profile[current + 1].first <= start_min ||
1050 profile[current].second < event.demand) {
1051 ++current;
1052 }
1053
1054 const IntegerValue actual_start =
1055 std::max(start_min, profile[current].first);
1056 start_of_previous_task = actual_start;
1057
1058 // Compatible with the event.start_max ?
1059 if (actual_start > event.start_max) return false;
1060
1061 const IntegerValue actual_end = actual_start + event.size;
1062 sum_of_ends += actual_end;
1063 sum_of_weighted_ends += event.weight * actual_end;
1064
1065 // No need to update the profile on the last loop.
1066 if (&event == &events.back()) break;
1067
1068 // Update the profile.
1069 new_profile.clear();
1070 new_profile.push_back(
1071 {actual_start, profile[current].second - event.demand});
1072 ++current;
1073
1074 while (profile[current].first < actual_end) {
1075 new_profile.push_back(
1076 {profile[current].first, profile[current].second - event.demand});
1077 ++current;
1078 }
1079
1080 if (profile[current].first > actual_end) {
1081 new_profile.push_back(
1082 {actual_end, new_profile.back().second + event.demand});
1083 }
1084 while (current < profile.size()) {
1085 new_profile.push_back(profile[current]);
1086 ++current;
1087 }
1088 profile.swap(new_profile);
1089 }
1090 return true;
1091}
1092
1093} // namespace
1094
1095bool ComputeMinSumOfWeightedEndMins(std::vector<PermutableEvent>& events,
1096 IntegerValue capacity_max,
1097 IntegerValue& min_sum_of_end_mins,
1098 IntegerValue& min_sum_of_weighted_end_mins,
1099 IntegerValue unweighted_threshold,
1100 IntegerValue weighted_threshold) {
1101 int num_explored = 0;
1102 int num_pruned = 0;
1103 min_sum_of_end_mins = kMaxIntegerValue;
1104 min_sum_of_weighted_end_mins = kMaxIntegerValue;
1105
1106 // Reusable storage for ComputeWeightedSumOfEndMinsForOnePermutation().
1107 std::vector<std::pair<IntegerValue, IntegerValue>> profile;
1108 std::vector<std::pair<IntegerValue, IntegerValue>> new_profile;
1109 do {
1110 IntegerValue sum_of_ends(0);
1111 IntegerValue sum_of_weighted_ends(0);
1112 if (ComputeWeightedSumOfEndMinsForOnePermutation(
1113 events, capacity_max, sum_of_ends, sum_of_weighted_ends, profile,
1114 new_profile)) {
1115 min_sum_of_end_mins = std::min(sum_of_ends, min_sum_of_end_mins);
1116 min_sum_of_weighted_end_mins =
1117 std::min(sum_of_weighted_ends, min_sum_of_weighted_end_mins);
1118 num_explored++;
1119 if (min_sum_of_end_mins <= unweighted_threshold &&
1120 min_sum_of_weighted_end_mins <= weighted_threshold) {
1121 break;
1122 }
1123 } else {
1124 num_pruned++;
1125 }
1126 } while (std::next_permutation(events.begin(), events.end()));
1127 VLOG(3) << "DP: size=" << events.size() << ", explored = " << num_explored
1128 << ", pruned = " << num_pruned
1129 << ", min_sum_of_end_mins = " << min_sum_of_end_mins
1130 << ", min_sum_of_weighted_end_mins = "
1131 << min_sum_of_weighted_end_mins;
1132 return num_explored > 0;
1133}
1134
1135// TODO(user): Improve performance
1136// - detect disjoint tasks (no need to crossover to the second part)
1137// - better caching of explored states
1139 const std::string& cut_name, std::vector<CtEvent> events,
1140 IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) {
1141 TopNCuts top_n_cuts(5);
1142 // Sort by start min to bucketize by start_min.
1143 std::sort(events.begin(), events.end(),
1144 [](const CtEvent& e1, const CtEvent& e2) {
1145 return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) <
1146 std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end);
1147 });
1148 std::vector<PermutableEvent> permutable_events;
1149 for (int start = 0; start + 1 < events.size(); ++start) {
1150 // Skip to the next start_min value.
1151 if (start > 0 &&
1152 events[start].x_start_min == events[start - 1].x_start_min) {
1153 continue;
1154 }
1155
1156 const IntegerValue sequence_start_min = events[start].x_start_min;
1157 std::vector<CtEvent> residual_tasks(events.begin() + start, events.end());
1158
1159 // We look at event that start before sequence_start_min, but are forced
1160 // to cross this time point. In that case, we replace this event by a
1161 // truncated event starting at sequence_start_min. To do this, we reduce
1162 // the size_min, and align the start_min with the sequence_start_min.
1163 for (int before = 0; before < start; ++before) {
1164 if (events[before].x_start_min + events[before].x_size_min >
1165 sequence_start_min) {
1166 residual_tasks.push_back(events[before]); // Copy.
1167 residual_tasks.back().lifted = true;
1168 }
1169 }
1170
1171 std::sort(residual_tasks.begin(), residual_tasks.end(),
1172 [](const CtEvent& e1, const CtEvent& e2) {
1173 return e1.x_lp_end < e2.x_lp_end;
1174 });
1175
1176 IntegerValue sum_of_durations(0);
1177 IntegerValue sum_of_energies(0);
1178 double sum_of_ends_lp = 0.0;
1179 double sum_of_weighted_ends_lp = 0.0;
1180 IntegerValue sum_of_demands(0);
1181
1182 permutable_events.clear();
1183 for (int i = 0; i < std::min<int>(residual_tasks.size(), 7); ++i) {
1184 const CtEvent& event = residual_tasks[i];
1185 permutable_events.emplace_back(i, event);
1186 sum_of_ends_lp += event.x_lp_end;
1187 sum_of_weighted_ends_lp += event.x_lp_end * ToDouble(event.y_size_min);
1188 sum_of_demands += event.y_size_min;
1189 sum_of_durations += event.x_size_min;
1190 sum_of_energies += event.x_size_min * event.y_size_min;
1191
1192 // Both cases with 1 or 2 tasks are trivial and independent of the order.
1193 // Also, if capacity is not exceeded, pushing all ends left is a valid LP
1194 // assignment.
1195 if (i <= 1 || sum_of_demands <= capacity_max) continue;
1196
1197 IntegerValue min_sum_of_end_mins = kMaxIntegerValue;
1198 IntegerValue min_sum_of_weighted_end_mins = kMaxIntegerValue;
1199 for (int j = 0; j <= i; ++j) {
1200 // We re-index the elements, so we will start enumerating the
1201 // permutation from there. Note that if the previous i caused an abort
1202 // because of the threshold, we might abort right away again!
1203 permutable_events[j].index = j;
1204 }
1206 permutable_events, capacity_max, min_sum_of_end_mins,
1207 min_sum_of_weighted_end_mins,
1208 /*unweighted_threshold=*/
1209 std::floor(sum_of_ends_lp + kMinCutViolation),
1210 /*weighted_threshold=*/
1211 std::floor(sum_of_weighted_ends_lp + kMinCutViolation))) {
1212 break;
1213 }
1214
1215 const double unweigthed_violation =
1216 (ToDouble(min_sum_of_end_mins) - sum_of_ends_lp) /
1217 ToDouble(sum_of_durations);
1218 const double weighted_violation =
1219 (ToDouble(min_sum_of_weighted_end_mins) - sum_of_weighted_ends_lp) /
1220 ToDouble(sum_of_energies);
1221
1222 // Unweighted cuts.
1223 if (unweigthed_violation > weighted_violation &&
1224 unweigthed_violation > kMinCutViolation) {
1225 LinearConstraintBuilder cut(model, min_sum_of_end_mins,
1227 bool is_lifted = false;
1228 for (int j = 0; j <= i; ++j) {
1229 const CtEvent& event = residual_tasks[j];
1230 is_lifted |= event.lifted;
1231 cut.AddTerm(event.x_end, IntegerValue(1));
1232 }
1233 std::string full_name = cut_name;
1234 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1235 }
1236
1237 // Weighted cuts.
1238 if (weighted_violation >= unweigthed_violation &&
1239 weighted_violation > kMinCutViolation) {
1240 LinearConstraintBuilder cut(model, min_sum_of_weighted_end_mins,
1242 bool is_lifted = false;
1243 for (int j = 0; j <= i; ++j) {
1244 const CtEvent& event = residual_tasks[j];
1245 is_lifted |= event.lifted;
1246 cut.AddTerm(event.x_end, event.y_size_min);
1247 }
1248 std::string full_name = cut_name + "_weighted";
1249 if (is_lifted) full_name.append("_lifted");
1250 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1251 }
1252 }
1253 }
1254 top_n_cuts.TransferToManager(manager);
1255}
1256
1257// We generate the cut from the Smith's rule from:
1258// M. Queyranne, Structure of a simple scheduling polyhedron,
1259// Mathematical Programming 58 (1993), 263–285
1260//
1261// The original cut is:
1262// sum(end_min_i * duration_min_i) >=
1263// (sum(duration_min_i^2) + sum(duration_min_i)^2) / 2
1264// We strengthen this cuts by noticing that if all tasks starts after S,
1265// then replacing end_min_i by (end_min_i - S) is still valid.
1266//
1267// A second difference is that we look at a set of intervals starting
1268// after a given start_min, sorted by relative (end_lp - start_min).
1269//
1270// TODO(user): merge with Packing cuts.
1271void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name,
1272 std::vector<CtEvent> events,
1273 IntegerValue capacity_max,
1274 bool skip_low_sizes, Model* model,
1275 LinearConstraintManager* manager) {
1276 TopNCuts top_n_cuts(5);
1277
1278 // Sort by start min to bucketize by start_min.
1279 std::sort(events.begin(), events.end(),
1280 [](const CtEvent& e1, const CtEvent& e2) {
1281 return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) <
1282 std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end);
1283 });
1284 for (int start = 0; start + 1 < events.size(); ++start) {
1285 // Skip to the next start_min value.
1286 if (start > 0 &&
1287 events[start].x_start_min == events[start - 1].x_start_min) {
1288 continue;
1289 }
1290
1291 const IntegerValue sequence_start_min = events[start].x_start_min;
1292 std::vector<CtEvent> residual_tasks(events.begin() + start, events.end());
1293 const VariablesAssignment& assignment =
1294 model->GetOrCreate<Trail>()->Assignment();
1295
1296 // We look at event that start before sequence_start_min, but are forced
1297 // to cross this time point. In that case, we replace this event by a
1298 // truncated event starting at sequence_start_min. To do this, we reduce
1299 // the size_min, align the start_min with the sequence_start_min, and
1300 // scale the energy down accordingly.
1301 for (int before = 0; before < start; ++before) {
1302 if (events[before].x_start_min + events[before].x_size_min >
1303 sequence_start_min) {
1304 // Build the vector of energies as the vector of sizes.
1305 CtEvent event = events[before]; // Copy.
1306 event.lifted = true;
1307 event.energy_min = ComputeEnergyMinInWindow(
1308 event.x_start_min, event.x_start_max, event.x_end_min,
1309 event.x_end_max, event.x_size_min, event.y_size_min,
1310 event.decomposed_energy, sequence_start_min, event.x_end_max);
1311 event.x_size_min =
1312 event.x_size_min + event.x_start_min - sequence_start_min;
1313 event.x_start_min = sequence_start_min;
1314 if (event.energy_min > event.x_size_min * event.y_size_min) {
1315 event.use_energy = true;
1316 }
1317 DCHECK_GE(event.energy_min, event.x_size_min * event.y_size_min);
1318 if (event.energy_min <= 0) continue;
1319 residual_tasks.push_back(event);
1320 }
1321 }
1322
1323 std::sort(residual_tasks.begin(), residual_tasks.end(),
1324 [](const CtEvent& e1, const CtEvent& e2) {
1325 return e1.x_lp_end < e2.x_lp_end;
1326 });
1327
1328 int best_end = -1;
1329 double best_efficacy = 0.01;
1330 IntegerValue best_min_contrib(0);
1331 IntegerValue sum_duration(0);
1332 IntegerValue sum_square_duration(0);
1333 IntegerValue best_capacity(0);
1334 double unscaled_lp_contrib = 0.0;
1335 IntegerValue current_start_min(kMaxIntegerValue);
1336
1337 MaxBoundedSubsetSum dp(capacity_max.value());
1338 std::vector<int64_t> possible_demands;
1339 for (int i = 0; i < residual_tasks.size(); ++i) {
1340 const CtEvent& event = residual_tasks[i];
1341 DCHECK_GE(event.x_start_min, sequence_start_min);
1342 const IntegerValue energy = event.energy_min;
1343 sum_duration += energy;
1344 if (!AddProductTo(energy, energy, &sum_square_duration)) break;
1345
1346 unscaled_lp_contrib += event.x_lp_end * ToDouble(energy);
1347 current_start_min = std::min(current_start_min, event.x_start_min);
1348
1349 if (dp.CurrentMax() != capacity_max) {
1350 if (event.y_size_is_fixed) {
1351 dp.Add(event.y_size_min.value());
1352 } else if (!event.decomposed_energy.empty()) {
1353 possible_demands.clear();
1354 for (const auto& [literal, size, demand] : event.decomposed_energy) {
1355 if (assignment.LiteralIsFalse(literal)) continue;
1356 possible_demands.push_back(demand.value());
1357 }
1358 dp.AddChoices(possible_demands);
1359 } else {
1360 dp.Add(capacity_max.value());
1361 }
1362 }
1363
1364 // This is competing with the brute force approach. Skip cases covered
1365 // by the other code.
1366 if (skip_low_sizes && i < 7) continue;
1367
1368 // We compute the cuts like if it was a disjunctive cut with all the
1369 // duration actually equal to energy / capacity. But to keep the
1370 // computation in the integer domain, we multiply by capacity
1371 // everywhere instead.
1372 const IntegerValue reachable_capacity = dp.CurrentMax();
1373 IntegerValue min_contrib = 0;
1374 if (!AddProductTo(sum_duration, sum_duration, &min_contrib)) break;
1375 if (!AddTo(sum_square_duration, &min_contrib)) break;
1376 min_contrib = min_contrib / 2; // The above is the double of the area.
1377
1378 const IntegerValue intermediate =
1379 CapProdI(sum_duration, reachable_capacity);
1380 if (AtMinOrMaxInt64I(intermediate)) break;
1381 const IntegerValue offset = CapProdI(current_start_min, intermediate);
1382 if (AtMinOrMaxInt64I(offset)) break;
1383 if (!AddTo(offset, &min_contrib)) break;
1384
1385 // We compute the efficacity in the unscaled domain where the l2 norm of
1386 // the cuts is exactly the sqrt of the sum of squared duration.
1387 const double efficacy =
1388 (ToDouble(min_contrib) / ToDouble(reachable_capacity) -
1389 unscaled_lp_contrib) /
1390 std::sqrt(ToDouble(sum_square_duration));
1391
1392 // TODO(user): Check overflow and ignore if too big.
1393 if (efficacy > best_efficacy) {
1394 best_efficacy = efficacy;
1395 best_end = i;
1396 best_min_contrib = min_contrib;
1397 best_capacity = reachable_capacity;
1398 }
1399 }
1400 if (best_end != -1) {
1401 LinearConstraintBuilder cut(model, best_min_contrib, kMaxIntegerValue);
1402 bool is_lifted = false;
1403 bool add_energy_to_name = false;
1404 for (int i = 0; i <= best_end; ++i) {
1405 const CtEvent& event = residual_tasks[i];
1406 is_lifted |= event.lifted;
1407 add_energy_to_name |= event.use_energy;
1408 cut.AddTerm(event.x_end, event.energy_min * best_capacity);
1409 }
1410 std::string full_name(cut_name);
1411 if (is_lifted) full_name.append("_lifted");
1412 if (add_energy_to_name) full_name.append("_energy");
1413 if (best_capacity < capacity_max) {
1414 full_name.append("_subsetsum");
1415 VLOG(2) << full_name << ": capacity = " << best_capacity << "/"
1416 << capacity_max;
1417 }
1418 top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues());
1419 }
1420 }
1421 top_n_cuts.TransferToManager(manager);
1422}
1423
1426 CutGenerator result;
1427 result.only_run_at_level_zero = true;
1428 AddIntegerVariableFromIntervals(helper, model, &result.vars);
1430
1431 result.generate_cuts = [helper, model](LinearConstraintManager* manager) {
1432 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1433
1434 auto generate_cuts = [model, manager, helper](bool mirror) {
1435 std::vector<CtEvent> events;
1436 const auto& lp_values = manager->LpValues();
1437 for (int index = 0; index < helper->NumTasks(); ++index) {
1438 if (!helper->IsPresent(index)) continue;
1439 const IntegerValue size_min = helper->SizeMin(index);
1440 if (size_min > 0) {
1441 const AffineExpression end_expr = helper->Ends()[index];
1442 CtEvent event(index, helper);
1443 event.x_end = end_expr;
1444 event.x_lp_end = end_expr.LpValue(lp_values);
1445 event.y_size_min = IntegerValue(1);
1446 event.energy_min = size_min;
1447 events.push_back(event);
1448 }
1449 }
1450
1451 const std::string mirror_str = mirror ? "Mirror" : "";
1453 absl::StrCat("NoOverlapCompletionTimeExhaustive", mirror_str), events,
1454 /*capacity_max=*/IntegerValue(1), model, manager);
1455
1457 absl::StrCat("NoOverlapCompletionTimeQueyrane", mirror_str),
1458 std::move(events), /*capacity_max=*/IntegerValue(1),
1459 /*skip_low_sizes=*/true, model, manager);
1460 };
1461 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1462 generate_cuts(false);
1463 if (!helper->SynchronizeAndSetTimeDirection(false)) return false;
1464 generate_cuts(true);
1465 return true;
1466 };
1467 return result;
1468}
1469
1471 SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper,
1472 const AffineExpression& capacity, Model* model) {
1473 CutGenerator result;
1474 result.only_run_at_level_zero = true;
1475 AppendVariablesFromCapacityAndDemands(capacity, demands_helper, model,
1476 &result.vars);
1477 AddIntegerVariableFromIntervals(helper, model, &result.vars);
1479
1480 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1481 result.generate_cuts = [integer_trail, helper, demands_helper, capacity,
1482 model](LinearConstraintManager* manager) {
1483 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1484 demands_helper->CacheAllEnergyValues();
1485
1486 auto generate_cuts = [integer_trail, model, manager, helper, demands_helper,
1487 capacity](bool mirror) {
1488 std::vector<CtEvent> events;
1489 const auto& lp_values = manager->LpValues();
1490 for (int index = 0; index < helper->NumTasks(); ++index) {
1491 if (!helper->IsPresent(index)) continue;
1492 if (helper->SizeMin(index) > 0 &&
1493 demands_helper->DemandMin(index) > 0) {
1494 CtEvent event(index, helper);
1495 event.x_end = helper->Ends()[index];
1496 event.x_lp_end = event.x_end.LpValue(lp_values);
1497 event.y_size_min = demands_helper->DemandMin(index);
1498 event.energy_min = demands_helper->EnergyMin(index);
1499 event.decomposed_energy = demands_helper->DecomposedEnergies()[index];
1500 event.y_size_is_fixed = demands_helper->DemandIsFixed(index);
1501 events.push_back(event);
1502 }
1503 }
1504
1505 const IntegerValue capacity_max = integer_trail->UpperBound(capacity);
1506 const std::string mirror_str = mirror ? "Mirror" : "";
1508 absl::StrCat("CumulativeCompletionTimeExhaustive", mirror_str),
1509 events, capacity_max, model, manager);
1510
1512 absl::StrCat("CumulativeCompletionTimeQueyrane", mirror_str),
1513 std::move(events), capacity_max,
1514 /*skip_low_sizes=*/true, model, manager);
1515 };
1516 if (!helper->SynchronizeAndSetTimeDirection(true)) return false;
1517 generate_cuts(false);
1518 if (!helper->SynchronizeAndSetTimeDirection(false)) return false;
1519 generate_cuts(true);
1520 return true;
1521 };
1522 return result;
1523}
1524
1525} // namespace sat
1526} // namespace operations_research
IntegerValue size
IntegerValue LowerBound(IntegerVariable i) const
Returns the current lower/upper bound of the given integer variable.
Definition integer.h:1717
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1721
IntegerValue FixedValue(IntegerVariable i) const
Checks that the variable is fixed and returns its value.
Definition integer.h:1729
bool IsFixed(IntegerVariable i) const
Checks if the variable is fixed.
Definition integer.h:1725
void AddQuadraticLowerBound(AffineExpression left, AffineExpression right, IntegerTrail *integer_trail, bool *is_quadratic=nullptr)
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff=IntegerValue(1))
void AddTerm(IntegerVariable var, IntegerValue coeff)
void AddLinearExpression(const LinearExpression &expr)
void AddConstant(IntegerValue value)
Adds the corresponding term to the current linear expression.
ABSL_MUST_USE_RESULT bool AddDecomposedProduct(absl::Span< const LiteralValueValue > product)
const util_intops::StrongVector< IntegerVariable, double > & LpValues()
To simplify CutGenerator api.
LiteralIndex Index() const
Definition sat_base.h:91
void AddChoices(absl::Span< const int64_t > choices)
Definition util.cc:508
void Add(int64_t value)
Add a value to the base set for which subset sums will be taken.
Definition util.cc:501
int NumTasks() const
Returns the number of task.
Definition intervals.h:293
absl::Span< const AffineExpression > Sizes() const
Definition intervals.h:481
absl::Span< const AffineExpression > Ends() const
Definition intervals.h:480
ABSL_MUST_USE_RESULT bool SynchronizeAndSetTimeDirection(bool is_forward)
Definition intervals.cc:483
const std::vector< std::vector< LiteralValueValue > > & DecomposedEnergies() const
Definition intervals.h:694
const std::vector< AffineExpression > & Demands() const
Definition intervals.h:650
ABSL_MUST_USE_RESULT bool AddLinearizedDemand(int t, LinearConstraintBuilder *builder) const
void TransferToManager(LinearConstraintManager *manager)
Empty the local pool and add all its content to the manager.
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
Adds a cut to the local pool.
bool LiteralIsFalse(Literal literal) const
Definition sat_base.h:185
int interval_index
GRBmodel * model
int lit
int literal
time_limit
Definition solve.cc:22
int index
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:58
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Computes result += a * b, and return false iff there is an overflow.
Definition integer.h:169
CutGenerator CreateCumulativeCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, std::vector< CtEvent > events, IntegerValue capacity_max, bool skip_low_sizes, Model *model, LinearConstraintManager *manager)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
bool AddTo(IntegerValue a, IntegerValue *result)
Definition integer.h:160
void GenerateCutsBetweenPairOfNonOverlappingTasks(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< CachedIntervalData > events, IntegerValue capacity_max, Model *model, LinearConstraintManager *manager)
const LiteralIndex kNoLiteralIndex(-1)
CutGenerator CreateCumulativePrecedenceCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
void GenerateShortCompletionTimeCutsWithExactBound(const std::string &cut_name, std::vector< CtEvent > events, IntegerValue capacity_max, Model *model, LinearConstraintManager *manager)
CutGenerator CreateCumulativeEnergyCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, const std::optional< AffineExpression > &makespan, Model *model)
void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(absl::string_view cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< EnergyEvent > events, IntegerValue capacity, AffineExpression makespan, TimeLimit *time_limit, Model *model, LinearConstraintManager *manager)
CutGenerator CreateCumulativeTimeTableCutGenerator(SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands_helper, const AffineExpression &capacity, Model *model)
void AppendVariablesFromCapacityAndDemands(const AffineExpression &capacity, SchedulingDemandHelper *demands_helper, Model *model, std::vector< IntegerVariable > *vars)
CutGenerator CreateNoOverlapCompletionTimeCutGenerator(SchedulingConstraintHelper *helper, Model *model)
bool ComputeMinSumOfWeightedEndMins(std::vector< PermutableEvent > &events, IntegerValue capacity_max, IntegerValue &min_sum_of_end_mins, IntegerValue &min_sum_of_weighted_end_mins, IntegerValue unweighted_threshold, IntegerValue weighted_threshold)
CutGenerator CreateNoOverlapEnergyCutGenerator(SchedulingConstraintHelper *helper, const std::optional< AffineExpression > &makespan, Model *model)
CutGenerator CreateNoOverlapPrecedenceCutGenerator(SchedulingConstraintHelper *helper, Model *model)
void AddIntegerVariableFromIntervals(SchedulingConstraintHelper *helper, Model *model, std::vector< IntegerVariable > *vars)
Cuts helpers.
void GenerateCumulativeEnergeticCuts(const std::string &cut_name, const util_intops::StrongVector< IntegerVariable, double > &lp_values, std::vector< EnergyEvent > events, const AffineExpression &capacity, TimeLimit *time_limit, Model *model, LinearConstraintManager *manager)
bool AtMinOrMaxInt64I(IntegerValue t)
Definition integer.h:121
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
Overflows and saturated arithmetic.
Definition integer.h:105
double ToDouble(IntegerValue value)
Definition integer.h:73
IntegerValue ComputeEnergyMinInWindow(IntegerValue start_min, IntegerValue start_max, IntegerValue end_min, IntegerValue end_max, IntegerValue size_min, IntegerValue demand_min, absl::Span< const LiteralValueValue > filtered_energy, IntegerValue window_start, IntegerValue window_end)
Definition intervals.cc:865
In SWIG mode, we don't want anything besides these top-level includes.
trees with all degrees equal w the current value of w
int64_t demand
Definition resource.cc:126
int64_t energy
Definition resource.cc:355
int64_t time
Definition resource.cc:1708
Rev< int64_t > start_max
Rev< int64_t > end_max
Rev< int64_t > start_min
Rev< int64_t > end_min
std::optional< int64_t > end
int64_t start
AffineExpression Negated() const
Definition integer.h:315
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
Returns the affine expression value under a given LP solution.
Definition integer.h:335
Internal methods and data structures, useful for testing.
std::vector< LiteralValueValue > decomposed_energy
IntegerValue y_size_min
Cache of the bounds on the y direction.
IntegerValue x_start_min
Cache of the intervals bound on the x direction.
BaseEvent(int t, SchedulingConstraintHelper *x_helper)
IntegerValue energy_min
The energy min of this event.
CachedIntervalData(int t, SchedulingConstraintHelper *helper)
AffineExpression x_end
The lp value of the end of the x interval.
std::vector< IntegerVariable > vars
Definition cuts.h:55
std::function< bool(LinearConstraintManager *manager)> generate_cuts
Definition cuts.h:56
AffineExpression y_size
We need this for linearizing the energy in some cases.
bool energy_is_quadratic
True if linearized_energy is not exact and a McCormick relaxation.
LiteralIndex presence_literal_index
If set, this event is optional and its presence is controlled by this.
EnergyEvent(int t, SchedulingConstraintHelper *x_helper)
ABSL_MUST_USE_RESULT bool FillEnergyLp(AffineExpression x_size, const util_intops::StrongVector< IntegerVariable, double > &lp_values, Model *model)
IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
Return[s] the evaluation of the linear expression.