Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
cumulative.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <functional>
18#include <vector>
19
20#include "absl/log/check.h"
21#include "absl/strings/str_join.h"
22#include "absl/types/span.h"
26#include "ortools/sat/integer.h"
31#include "ortools/sat/model.h"
41
42namespace operations_research {
43namespace sat {
44
45std::function<void(Model*)> Cumulative(
46 const std::vector<Literal>& enforcement_literals,
47 const std::vector<IntervalVariable>& vars,
48 absl::Span<const AffineExpression> demands, AffineExpression capacity,
50 return [=, demands = std::vector<AffineExpression>(
51 demands.begin(), demands.end())](Model* model) mutable {
52 auto* intervals = model->GetOrCreate<IntervalsRepository>();
53 auto* encoder = model->GetOrCreate<IntegerEncoder>();
54 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
55 auto* watcher = model->GetOrCreate<GenericLiteralWatcher>();
56 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
57
58 if (enforcement_literals.empty()) {
59 if (!integer_trail->SafeEnqueue(capacity.GreaterOrEqual(0), {})) {
60 sat_solver->NotifyThatModelIsUnsat();
61 }
62 } else {
63 LinearConstraintBuilder builder(model, IntegerValue(0), kMaxIntegerValue);
64 builder.AddTerm(capacity, IntegerValue(1));
65 LoadConditionalLinearConstraint(enforcement_literals, builder.Build(),
66 model);
67 }
68 if (demands.empty()) {
69 // If there is no demand, since we already added a constraint that the
70 // capacity is not negative above, we can stop here.
71 return;
72 }
73
74 // Redundant constraints to ensure that the resource capacity is high enough
75 // for each task. Also ensure that no task consumes more resource than what
76 // is available. This is useful because the subsequent propagators do not
77 // filter the capacity variable very well.
78 for (int i = 0; i < demands.size(); ++i) {
79 if (intervals->MaxSize(vars[i]) == 0) continue;
80
81 LinearConstraintBuilder builder(model, kMinIntegerValue, IntegerValue(0));
82 builder.AddTerm(demands[i], IntegerValue(1));
83 builder.AddTerm(capacity, IntegerValue(-1));
84 LinearConstraint ct = builder.Build();
85
86 std::vector<Literal> task_enforcement_literals = enforcement_literals;
87 if (intervals->IsOptional(vars[i])) {
88 task_enforcement_literals.push_back(
89 intervals->PresenceLiteral(vars[i]));
90 }
91
92 // If the interval can be of size zero, it currently do not count towards
93 // the capacity. TODO(user): Change that since we have optional interval
94 // for this.
95 if (intervals->MinSize(vars[i]) <= 0) {
96 task_enforcement_literals.push_back(
97 encoder->GetOrCreateAssociatedLiteral(
98 intervals->Size(vars[i]).GreaterOrEqual(IntegerValue(1))));
99 }
100
101 if (task_enforcement_literals.empty()) {
102 LoadLinearConstraint(ct, model);
103 } else {
104 LoadConditionalLinearConstraint(task_enforcement_literals, ct, model);
105 }
106 }
107
108 if (vars.size() == 1) return;
109
110 const SatParameters& parameters = *(model->GetOrCreate<SatParameters>());
111
112 // Detect a subset of intervals that needs to be in disjunction and add a
113 // Disjunctive() constraint over them.
115 // TODO(user): We need to exclude intervals that can be of size zero
116 // because the disjunctive do not "ignore" them like the cumulative
117 // does. That is, the interval [2,2) will be assumed to be in
118 // disjunction with [1, 3) for instance. We need to uniformize the
119 // handling of interval with size zero.
120 std::vector<IntervalVariable> in_disjunction;
121 IntegerValue min_of_demands = kMaxIntegerValue;
122 const IntegerValue capa_max = integer_trail->UpperBound(capacity);
123 for (int i = 0; i < vars.size(); ++i) {
124 const IntegerValue size_min = intervals->MinSize(vars[i]);
125 if (size_min == 0) continue;
126 const IntegerValue demand_min = integer_trail->LowerBound(demands[i]);
127 if (2 * demand_min > capa_max) {
128 in_disjunction.push_back(vars[i]);
129 min_of_demands = std::min(min_of_demands, demand_min);
130 }
131 }
132
133 // Liftable? We might be able to add one more interval!
134 if (!in_disjunction.empty()) {
135 IntervalVariable lift_var;
136 IntegerValue lift_size(0);
137 for (int i = 0; i < vars.size(); ++i) {
138 const IntegerValue size_min = intervals->MinSize(vars[i]);
139 if (size_min == 0) continue;
140 const IntegerValue demand_min = integer_trail->LowerBound(demands[i]);
141 if (2 * demand_min > capa_max) continue;
142 if (min_of_demands + demand_min > capa_max && size_min > lift_size) {
143 lift_var = vars[i];
144 lift_size = size_min;
145 }
146 }
147 if (lift_size > 0) {
148 in_disjunction.push_back(lift_var);
149 }
150 }
151
152 // Add a disjunctive constraint on the intervals in in_disjunction. Do not
153 // create the cumulative at all when all intervals must be in disjunction.
154 //
155 // TODO(user): Do proper experiments to see how beneficial this is, the
156 // disjunctive will propagate more but is also using slower algorithms.
157 // That said, this is more a question of optimizing the disjunctive
158 // propagation code.
159 //
160 // TODO(user): Another "known" idea is to detect pair of tasks that must
161 // be in disjunction and to create a Boolean to indicate which one is
162 // before the other. It shouldn't change the propagation, but may result
163 // in a faster one with smaller explanations, and the solver can also take
164 // decision on such Boolean.
165 //
166 // TODO(user): A better place for stuff like this could be in the
167 // presolver so that it is easier to disable and play with alternatives.
168 if (in_disjunction.size() > 1)
169 AddDisjunctive(enforcement_literals, in_disjunction, model);
170 if (in_disjunction.size() == vars.size()) return;
171 }
172
173 if (helper == nullptr) {
174 helper = intervals->GetOrCreateHelper(enforcement_literals, vars);
175 }
176 SchedulingDemandHelper* demands_helper =
177 intervals->GetOrCreateDemandHelper(helper, demands);
178 intervals->RegisterCumulative({capacity, helper, demands_helper});
179
180 // For each variables that is after a subset of task ends (i.e. like a
181 // makespan objective), we detect it and add a special constraint to
182 // propagate it.
183 //
184 // TODO(user): Models that include the makespan as a special interval might
185 // be better, but then not everyone does that. In particular this code
186 // allows to have decent lower bound on the large cumulative minizinc
187 // instances.
188 //
189 // TODO(user): this require the precedence constraints to be already loaded,
190 // and there is no guarantee of that currently. Find a more robust way.
191 //
192 // TODO(user): There is a bit of code duplication with the disjunctive
193 // precedence propagator. Abstract more?
194 if (parameters.use_hard_precedences_in_cumulative()) {
195 // The CumulativeIsAfterSubsetConstraint() always reset the helper to the
196 // forward time direction, so it is important to also precompute the
197 // precedence relation using the same direction! This is needed in case
198 // the helper has already been used and set in the other direction.
199 if (!helper->SynchronizeAndSetTimeDirection(true)) {
200 model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
201 return;
202 }
203
204 std::vector<IntegerVariable> index_to_end_vars;
205 std::vector<int> index_to_task;
206 index_to_end_vars.clear();
207 for (int t = 0; t < helper->NumTasks(); ++t) {
208 const AffineExpression& end_exp = helper->Ends()[t];
209
210 // TODO(user): Handle generic affine relation?
211 if (end_exp.var == kNoIntegerVariable || end_exp.coeff != 1) continue;
212 index_to_end_vars.push_back(end_exp.var);
213 index_to_task.push_back(t);
214 }
215
216 // TODO(user): This can lead to many constraints. By analyzing a bit more
217 // the precedences, we could restrict that. In particular for cases were
218 // the cumulative is always (bunch of tasks B), T, (bunch of tasks A) and
219 // task T always in the middle, we never need to explicit list the
220 // precedence of a task in B with a task in A.
221 //
222 // TODO(user): If more than one variable are after the same set of
223 // intervals, we should regroup them in a single constraint rather than
224 // having two independent constraint doing the same propagation.
225 std::vector<FullIntegerPrecedence> full_precedences;
226 if (parameters.exploit_all_precedences()) {
227 model->GetOrCreate<TransitivePrecedencesEvaluator>()
228 ->ComputeFullPrecedences(index_to_end_vars, &full_precedences);
229 }
230 for (const FullIntegerPrecedence& data : full_precedences) {
231 const int size = data.indices.size();
232 if (size <= 1) continue;
233
234 const IntegerVariable var = data.var;
235 std::vector<int> subtasks;
236 std::vector<IntegerValue> offsets;
237 IntegerValue sum_of_demand_max(0);
238 for (int i = 0; i < size; ++i) {
239 const int t = index_to_task[data.indices[i]];
240 subtasks.push_back(t);
241 sum_of_demand_max += integer_trail->LevelZeroUpperBound(demands[t]);
242
243 // We have var >= end_exp.var + offset, so
244 // var >= (end_exp.var + end_exp.cte) + (offset - end_exp.cte)
245 // var >= task end + new_offset.
246 const AffineExpression& end_exp = helper->Ends()[t];
247 offsets.push_back(data.offsets[i] - end_exp.constant);
248 }
249 if (sum_of_demand_max > integer_trail->LevelZeroLowerBound(capacity)) {
250 VLOG(2) << "Cumulative precedence constraint! var= " << var
251 << " #task: " << absl::StrJoin(subtasks, ",");
253 new CumulativeIsAfterSubsetConstraint(var, capacity, subtasks,
254 offsets, helper,
255 demands_helper, model);
256 constraint->RegisterWith(watcher);
257 model->TakeOwnership(constraint);
258 }
259 }
260 }
261
262 // Propagator responsible for applying Timetabling filtering rule. It
263 // increases the minimum of the start variables, decrease the maximum of the
264 // end variables, and increase the minimum of the capacity variable.
265 TimeTablingPerTask* time_tabling =
266 new TimeTablingPerTask(capacity, helper, demands_helper, model);
267 time_tabling->RegisterWith(watcher);
268 model->TakeOwnership(time_tabling);
269
270 // Propagator responsible for applying the Overload Checking filtering rule.
271 // It increases the minimum of the capacity variable.
272 if (parameters.use_overload_checker_in_cumulative()) {
273 AddCumulativeOverloadChecker(capacity, helper, demands_helper, model);
274 }
276 // Since we use the potential DFF conflict on demands to apply the
277 // heuristic, only do so if any demand is greater than 1.
278 bool any_demand_greater_than_one = false;
279 for (int i = 0; i < vars.size(); ++i) {
280 const IntegerValue demand_min = integer_trail->LowerBound(demands[i]);
281 if (demand_min > 1) {
282 any_demand_greater_than_one = true;
283 break;
284 }
285 }
286 if (any_demand_greater_than_one) {
287 AddCumulativeOverloadCheckerDff(capacity, helper, demands_helper,
288 model);
289 }
290 }
291
292 // Propagator responsible for applying the Timetable Edge finding filtering
293 // rule. It increases the minimum of the start variables and decreases the
294 // maximum of the end variables,
296 helper->NumTasks() <=
298 TimeTableEdgeFinding* time_table_edge_finding =
299 new TimeTableEdgeFinding(capacity, helper, demands_helper, model);
300 time_table_edge_finding->RegisterWith(watcher);
301 model->TakeOwnership(time_table_edge_finding);
302 }
303 };
304}
305
306std::function<void(Model*)> CumulativeTimeDecomposition(
307 absl::Span<const Literal> enforcement_literals,
308 absl::Span<const IntervalVariable> vars,
309 absl::Span<const AffineExpression> demands, AffineExpression capacity,
310 SchedulingConstraintHelper* /*helper*/) {
311 CHECK(enforcement_literals.empty());
312 return [=, vars = std::vector<IntervalVariable>(vars.begin(), vars.end()),
313 demands = std::vector<AffineExpression>(
314 demands.begin(), demands.end())](Model* model) {
315 if (vars.empty()) return;
316
317 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
318 CHECK(integer_trail->IsFixed(capacity));
319 const Coefficient fixed_capacity(
320 integer_trail->UpperBound(capacity).value());
321
322 const int num_tasks = vars.size();
323 SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
324 IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
325 IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
326
327 std::vector<AffineExpression> start_exprs;
328 std::vector<AffineExpression> end_exprs;
329 std::vector<IntegerValue> fixed_demands;
330
331 for (int t = 0; t < num_tasks; ++t) {
332 start_exprs.push_back(repository->Start(vars[t]));
333 end_exprs.push_back(repository->End(vars[t]));
334 CHECK(integer_trail->IsFixed(demands[t]));
335 fixed_demands.push_back(integer_trail->LowerBound(demands[t]));
336 }
337
338 // Compute time range.
339 IntegerValue min_start = kMaxIntegerValue;
340 IntegerValue max_end = kMinIntegerValue;
341 for (int t = 0; t < num_tasks; ++t) {
342 min_start =
343 std::min(min_start, integer_trail->LowerBound(start_exprs[t]));
344 max_end = std::max(max_end, integer_trail->UpperBound(end_exprs[t]));
345 }
346
347 for (IntegerValue time = min_start; time < max_end; ++time) {
348 std::vector<LiteralWithCoeff> literals_with_coeff;
349 for (int t = 0; t < num_tasks; ++t) {
350 if (!sat_solver->Propagate()) return;
351 const IntegerValue start_min =
352 integer_trail->LowerBound(start_exprs[t]);
353 const IntegerValue end_max = integer_trail->UpperBound(end_exprs[t]);
354 if (end_max <= time || time < start_min || fixed_demands[t] == 0) {
355 continue;
356 }
357
358 // Task t consumes the resource at time if consume_condition is true.
359 std::vector<Literal> consume_condition;
360 const Literal consume = Literal(model->Add(NewBooleanVariable()), true);
361
362 // Task t consumes the resource at time if it is present.
363 if (repository->IsOptional(vars[t])) {
364 consume_condition.push_back(repository->PresenceLiteral(vars[t]));
365 }
366
367 // Task t overlaps time.
368 consume_condition.push_back(encoder->GetOrCreateAssociatedLiteral(
369 start_exprs[t].LowerOrEqual(IntegerValue(time))));
370 consume_condition.push_back(encoder->GetOrCreateAssociatedLiteral(
371 end_exprs[t].GreaterOrEqual(IntegerValue(time + 1))));
372
373 model->Add(ReifiedBoolAnd(consume_condition, consume));
374
375 // this is needed because we currently can't create a boolean variable
376 // if the model is unsat.
377 if (sat_solver->ModelIsUnsat()) return;
378
379 literals_with_coeff.push_back(
380 LiteralWithCoeff(consume, Coefficient(fixed_demands[t].value())));
381 }
382 // The profile cannot exceed the capacity at time.
383 sat_solver->AddLinearConstraint(false, Coefficient(0), true,
384 fixed_capacity, &literals_with_coeff);
385
386 // Abort if UNSAT.
387 if (sat_solver->ModelIsUnsat()) return;
388 }
389 };
390}
391
392std::function<void(Model*)> CumulativeUsingReservoir(
393 absl::Span<const Literal> enforcement_literals,
394 absl::Span<const IntervalVariable> vars,
395 absl::Span<const AffineExpression> demands, AffineExpression capacity,
396 SchedulingConstraintHelper* /*helper*/) {
397 return [=, vars = std::vector<IntervalVariable>(vars.begin(), vars.end()),
398 demands = std::vector<AffineExpression>(
399 demands.begin(), demands.end())](Model* model) {
400 if (vars.empty()) return;
401
402 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
403 auto* encoder = model->GetOrCreate<IntegerEncoder>();
404 auto* repository = model->GetOrCreate<IntervalsRepository>();
405
406 CHECK(integer_trail->IsFixed(capacity));
407 const IntegerValue fixed_capacity(
408 integer_trail->UpperBound(capacity).value());
409
410 std::vector<AffineExpression> times;
411 std::vector<AffineExpression> deltas;
412 std::vector<Literal> presences;
413
414 const int num_tasks = vars.size();
415 for (int t = 0; t < num_tasks; ++t) {
416 CHECK(integer_trail->IsFixed(demands[t]));
417 times.push_back(repository->Start(vars[t]));
418 deltas.push_back(demands[t]);
419 times.push_back(repository->End(vars[t]));
420 deltas.push_back(demands[t].Negated());
421 if (repository->IsOptional(vars[t])) {
422 presences.push_back(repository->PresenceLiteral(vars[t]));
423 presences.push_back(repository->PresenceLiteral(vars[t]));
424 } else {
425 presences.push_back(encoder->GetTrueLiteral());
426 presences.push_back(encoder->GetTrueLiteral());
427 }
428 }
429 AddReservoirConstraint(enforcement_literals, times, deltas, presences, 0,
430 fixed_capacity.value(), model);
431 };
432}
433
434} // namespace sat
435} // namespace operations_research
IntegerValue LowerBound(IntegerVariable i) const
Definition integer.h:1537
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1541
bool IsFixed(IntegerVariable i) const
Definition integer.h:1545
void AddTerm(IntegerVariable var, IntegerValue coeff)
::int32_t max_num_intervals_for_timetable_edge_finding() const
ABSL_MUST_USE_RESULT bool Propagate()
bool AddLinearConstraint(bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, Coefficient upper_bound, std::vector< Literal > *enforcement_literals, std::vector< LiteralWithCoeff > *cst)
absl::Span< const AffineExpression > Ends() const
ABSL_MUST_USE_RESULT bool SynchronizeAndSetTimeDirection(bool is_forward)
void RegisterWith(GenericLiteralWatcher *watcher)
void RegisterWith(GenericLiteralWatcher *watcher)
Definition timetable.cc:378
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
std::function< void(Model *)> CumulativeUsingReservoir(absl::Span< const Literal > enforcement_literals, absl::Span< const IntervalVariable > vars, absl::Span< const AffineExpression > demands, AffineExpression capacity, SchedulingConstraintHelper *)
std::function< BooleanVariable(Model *)> NewBooleanVariable()
Definition integer.h:1757
void LoadConditionalLinearConstraint(const absl::Span< const Literal > enforcement_literals, const LinearConstraint &cst, Model *model)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
std::function< void(Model *)> ReifiedBoolAnd(absl::Span< const Literal > literals, Literal r)
const IntegerVariable kNoIntegerVariable(-1)
void AddCumulativeOverloadCheckerDff(AffineExpression capacity, SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands, Model *model)
void AddReservoirConstraint(absl::Span< const Literal > enforcement_literals, absl::Span< const AffineExpression > times, absl::Span< const AffineExpression > deltas, absl::Span< const Literal > presences, int64_t min_level, int64_t max_level, Model *model)
Definition timetable.cc:33
std::function< void(Model *)> Cumulative(const std::vector< Literal > &enforcement_literals, const std::vector< IntervalVariable > &vars, absl::Span< const AffineExpression > demands, AffineExpression capacity, SchedulingConstraintHelper *helper)
Definition cumulative.cc:45
void AddDisjunctive(const std::vector< Literal > &enforcement_literals, const std::vector< IntervalVariable > &intervals, Model *model)
void LoadLinearConstraint(const ConstraintProto &ct, Model *m)
std::function< void(Model *)> CumulativeTimeDecomposition(absl::Span< const Literal > enforcement_literals, absl::Span< const IntervalVariable > vars, absl::Span< const AffineExpression > demands, AffineExpression capacity, SchedulingConstraintHelper *)
void AddCumulativeOverloadChecker(AffineExpression capacity, SchedulingConstraintHelper *helper, SchedulingDemandHelper *demands, Model *model)
OR-Tools root namespace.
IntegerLiteral GreaterOrEqual(IntegerValue bound) const