Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
cuts.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/cuts.h"
15
16#include <algorithm>
17#include <array>
18#include <cmath>
19#include <cstdint>
20#include <functional>
21#include <limits>
22#include <string>
23#include <tuple>
24#include <utility>
25#include <vector>
26
27#include "absl/base/attributes.h"
28#include "absl/container/btree_map.h"
29#include "absl/container/btree_set.h"
30#include "absl/container/flat_hash_map.h"
31#include "absl/container/flat_hash_set.h"
32#include "absl/log/check.h"
33#include "absl/log/log.h"
34#include "absl/log/vlog_is_on.h"
35#include "absl/meta/type_traits.h"
36#include "absl/numeric/int128.h"
37#include "absl/strings/str_cat.h"
38#include "absl/strings/string_view.h"
39#include "absl/types/span.h"
44#include "ortools/sat/clause.h"
46#include "ortools/sat/integer.h"
50#include "ortools/sat/model.h"
56
57namespace operations_research {
58namespace sat {
59
60namespace {
61
62// TODO(user): the function ToDouble() does some testing for min/max integer
63// value and we don't need that here.
64double AsDouble(IntegerValue v) { return static_cast<double>(v.value()); }
65
66} // namespace
67
68std::string CutTerm::DebugString() const {
69 return absl::StrCat("coeff=", coeff.value(), " lp=", lp_value,
70 " range=", bound_diff.value(), " expr=[",
71 expr_coeffs[0].value(), ",", expr_coeffs[1].value(), "]",
72 " * [V", expr_vars[0].value(), ",V", expr_vars[1].value(),
73 "]", " + ", expr_offset.value());
74}
75
76std::string CutData::DebugString() const {
77 std::string result = absl::StrCat("CutData rhs=", rhs, "\n");
78 for (const CutTerm& term : terms) {
79 absl::StrAppend(&result, term.DebugString(), "\n");
80 }
81 return result;
82}
83
84void CutTerm::Complement(absl::int128* rhs) {
85 // We replace coeff * X by coeff * (X - bound_diff + bound_diff)
86 // which gives -coeff * complement(X) + coeff * bound_diff;
87 *rhs -= absl::int128(coeff.value()) * absl::int128(bound_diff.value());
88
89 // We keep the same expression variable.
90 for (int i = 0; i < 2; ++i) {
92 }
94
95 // Note that this is not involutive because of floating point error. Fix?
96 lp_value = static_cast<double>(bound_diff.value()) - lp_value;
97 coeff = -coeff;
98
99 // Swap the implied bound info.
101}
102
103void CutTerm::ReplaceExpressionByLiteral(IntegerVariable var) {
104 CHECK_EQ(bound_diff, 1);
105 expr_coeffs[1] = 0;
106 if (VariableIsPositive(var)) {
107 expr_vars[0] = var;
108 expr_coeffs[0] = 1;
109 expr_offset = 0;
110 } else {
111 expr_vars[0] = PositiveVariable(var);
112 expr_coeffs[0] = -1;
113 expr_offset = 1;
114 }
115}
116
117IntegerVariable CutTerm::GetUnderlyingLiteralOrNone() const {
118 if (expr_coeffs[1] != 0) return kNoIntegerVariable;
119 if (bound_diff != 1) return kNoIntegerVariable;
120
121 if (expr_coeffs[0] > 0) {
122 if (expr_coeffs[0] != 1) return kNoIntegerVariable;
123 if (expr_offset != 0) return kNoIntegerVariable;
124 CHECK(VariableIsPositive(expr_vars[0]));
125 return expr_vars[0];
126 }
127
128 if (expr_coeffs[0] != -1) return kNoIntegerVariable;
129 if (expr_offset != 1) return kNoIntegerVariable;
130 CHECK(VariableIsPositive(expr_vars[0]));
131 return NegationOf(expr_vars[0]);
132}
133
134// To try to minimize the risk of overflow, we switch to the bound closer
135// to the lp_value. Since most of our base constraint for cut are tight,
136// hopefully this is not too bad.
137bool CutData::AppendOneTerm(IntegerVariable var, IntegerValue coeff,
138 double lp_value, IntegerValue lb, IntegerValue ub) {
139 if (coeff == 0) return true;
140 const IntegerValue bound_diff = ub - lb;
141
142 // Complement the variable so that it is has a positive coefficient.
143 const bool complement = coeff < 0;
144
145 // See formula below, the constant term is either coeff * lb or coeff * ub.
146 rhs -= absl::int128(coeff.value()) *
147 absl::int128(complement ? ub.value() : lb.value());
148
149 // Deal with fixed variable, no need to shift back in this case, we can
150 // just remove the term.
151 if (bound_diff == 0) return true;
152
153 CutTerm entry;
154 entry.expr_vars[0] = var;
155 entry.expr_coeffs[1] = 0;
156 entry.bound_diff = bound_diff;
157 if (complement) {
158 // X = -(UB - X) + UB
159 entry.expr_coeffs[0] = -IntegerValue(1);
160 entry.expr_offset = ub;
161 entry.coeff = -coeff;
162 entry.lp_value = static_cast<double>(ub.value()) - lp_value;
163 } else {
164 // C = (X - LB) + LB
165 entry.expr_coeffs[0] = IntegerValue(1);
166 entry.expr_offset = -lb;
167 entry.coeff = coeff;
168 entry.lp_value = lp_value - static_cast<double>(lb.value());
169 }
170 terms.push_back(entry);
171 return true;
172}
173
175 const LinearConstraint& base_ct,
177 IntegerTrail* integer_trail) {
178 rhs = absl::int128(base_ct.ub.value());
179 terms.clear();
180 const int num_terms = base_ct.num_terms;
181 for (int i = 0; i < num_terms; ++i) {
182 const IntegerVariable var = base_ct.vars[i];
183 if (!AppendOneTerm(var, base_ct.coeffs[i], lp_values[base_ct.vars[i]],
184 integer_trail->LevelZeroLowerBound(var),
185 integer_trail->LevelZeroUpperBound(var))) {
186 return false;
187 }
188 }
189 return true;
190}
191
193 IntegerValue ub, absl::Span<const IntegerVariable> vars,
194 absl::Span<const IntegerValue> coeffs, absl::Span<const double> lp_values,
195 absl::Span<const IntegerValue> lower_bounds,
196 absl::Span<const IntegerValue> upper_bounds) {
197 rhs = absl::int128(ub.value());
198 terms.clear();
199
200 const int size = lp_values.size();
201 if (size == 0) return true;
202
203 CHECK_EQ(vars.size(), size);
204 CHECK_EQ(coeffs.size(), size);
205 CHECK_EQ(lower_bounds.size(), size);
206 CHECK_EQ(upper_bounds.size(), size);
207
208 for (int i = 0; i < size; ++i) {
209 if (!AppendOneTerm(vars[i], coeffs[i], lp_values[i], lower_bounds[i],
210 upper_bounds[i])) {
211 return false;
212 }
213 }
214
215 return true;
216}
217
219 for (CutTerm& term : terms) {
220 if (term.coeff >= 0) continue;
221 term.Complement(&rhs);
222 }
223}
224
226 for (CutTerm& term : terms) {
227 if (term.lp_value <= term.LpDistToMaxValue()) continue;
228 term.Complement(&rhs);
229 }
230}
231
233 for (const CutTerm& term : terms) {
234 if (term.coeff < 0) return false;
235 }
236 return true;
237}
238
241 max_magnitude = 0;
242 for (CutTerm& entry : terms) {
243 max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.coeff));
244 if (entry.HasRelevantLpValue()) {
245 std::swap(terms[num_relevant_entries], entry);
247 }
248 }
249
250 // Sort by larger lp_value first.
251 std::sort(terms.begin(), terms.begin() + num_relevant_entries,
252 [](const CutTerm& a, const CutTerm& b) {
253 return a.lp_value > b.lp_value;
254 });
255}
256
258 double violation = -static_cast<double>(rhs);
259 for (const CutTerm& term : terms) {
260 violation += term.lp_value * static_cast<double>(term.coeff.value());
261 }
262 return violation;
263}
264
266 double violation = -static_cast<double>(rhs);
267 double norm = 0.0;
268 for (const CutTerm& term : terms) {
269 const double coeff = static_cast<double>(term.coeff.value());
270 violation += term.lp_value * coeff;
271 norm += coeff * coeff;
272 }
273 return violation / std::sqrt(norm);
274}
275
276// We can only merge the term if term.coeff + old_coeff do not overflow and
277// if t * new_coeff do not overflow.
278//
279// If we cannot merge the term, we will keep them separate. The produced cut
280// will be less strong, but can still be used.
281bool CutDataBuilder::MergeIfPossible(IntegerValue t, CutTerm& to_add,
282 CutTerm& target) {
283 DCHECK_EQ(to_add.expr_vars[0], target.expr_vars[0]);
284 DCHECK_EQ(to_add.expr_coeffs[0], target.expr_coeffs[0]);
285
286 const IntegerValue new_coeff = CapAddI(to_add.coeff, target.coeff);
287 if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) {
288 return false;
289 }
290
291 to_add.coeff = 0; // Clear since we merge it.
292 target.coeff = new_coeff;
293 return true;
294}
295
296// We only deal with coeff * Bool or coeff * (1 - Bool)
297//
298// TODO(user): Because of merges, we might have entry with a coefficient of
299// zero than are not useful. Remove them?
300int CutDataBuilder::AddOrMergeBooleanTerms(absl::Span<CutTerm> new_terms,
301 IntegerValue t, CutData* cut) {
302 if (new_terms.empty()) return 0;
303
304 bool_index_.clear();
305 secondary_bool_index_.clear();
306 int num_merges = 0;
307
308 // Fill the maps.
309 int i = 0;
310 for (CutTerm& term : new_terms) {
311 const IntegerVariable var = term.expr_vars[0];
312 auto& map = term.expr_coeffs[0] > 0 ? bool_index_ : secondary_bool_index_;
313 const auto [it, inserted] = map.insert({var, i});
314 if (!inserted) {
315 if (MergeIfPossible(t, term, new_terms[it->second])) {
316 ++num_merges;
317 }
318 }
319 ++i;
320 }
321
322 // Loop over the cut now. Note that we loop with indices as we might add new
323 // terms in the middle of the loop.
324 for (CutTerm& term : cut->terms) {
325 if (term.bound_diff != 1) continue;
326 if (!term.IsSimple()) continue;
327
328 const IntegerVariable var = term.expr_vars[0];
329 auto& map = term.expr_coeffs[0] > 0 ? bool_index_ : secondary_bool_index_;
330 auto it = map.find(var);
331 if (it == map.end()) continue;
332
333 // We found a match, try to merge the map entry into the cut.
334 // Note that we don't waste time erasing this entry from the map since
335 // we should have no duplicates in the original cut.
336 if (MergeIfPossible(t, new_terms[it->second], term)) {
337 ++num_merges;
338 }
339 }
340
341 // Finally add the terms we couldn't merge.
342 for (const CutTerm& term : new_terms) {
343 if (term.coeff == 0) continue;
344 cut->terms.push_back(term);
345 }
346
347 return num_merges;
348}
349
350// TODO(user): Divide by gcd first to avoid possible overflow in the
351// conversion? it is however unlikely given that our coeffs should be small.
352ABSL_DEPRECATED("Only used in tests, this will be removed.")
354 LinearConstraint* output) {
355 tmp_map_.clear();
356 if (cut.rhs > absl::int128(std::numeric_limits<int64_t>::max()) ||
357 cut.rhs < absl::int128(std::numeric_limits<int64_t>::min())) {
358 return false;
359 }
360 IntegerValue new_rhs = static_cast<int64_t>(cut.rhs);
361 for (const CutTerm& term : cut.terms) {
362 for (int i = 0; i < 2; ++i) {
363 if (term.expr_coeffs[i] == 0) continue;
364 if (!AddProductTo(term.coeff, term.expr_coeffs[i],
365 &tmp_map_[term.expr_vars[i]])) {
366 return false;
367 }
368 }
369 if (!AddProductTo(-term.coeff, term.expr_offset, &new_rhs)) {
370 return false;
371 }
372 }
373
374 output->lb = kMinIntegerValue;
375 output->ub = new_rhs;
376 output->resize(tmp_map_.size());
377 int new_size = 0;
378 for (const auto [var, coeff] : tmp_map_) {
379 if (coeff == 0) continue;
380 output->vars[new_size] = var;
381 output->coeffs[new_size] = coeff;
382 ++new_size;
383 }
384 output->resize(new_size);
385 DivideByGCD(output);
386 return true;
387}
388
389namespace {
390
391// Minimum amount of violation of the cut constraint by the solution. This
392// is needed to avoid numerical issues and adding cuts with minor effect.
393const double kMinCutViolation = 1e-4;
394
395IntegerValue PositiveRemainder(absl::int128 dividend,
396 IntegerValue positive_divisor) {
397 DCHECK_GT(positive_divisor, 0);
398 const IntegerValue m =
399 static_cast<int64_t>(dividend % absl::int128(positive_divisor.value()));
400 return m < 0 ? m + positive_divisor : m;
401}
402
403// We use the fact that f(k * divisor + rest) = k * f(divisor) + f(rest)
404absl::int128 ApplyToInt128(const std::function<IntegerValue(IntegerValue)>& f,
405 IntegerValue divisor, absl::int128 value) {
406 const IntegerValue rest = PositiveRemainder(value, divisor);
407 const absl::int128 k =
408 (value - absl::int128(rest.value())) / absl::int128(divisor.value());
409 const absl::int128 result =
410 k * absl::int128(f(divisor).value()) + absl::int128(f(rest).value());
411 return result;
412}
413
414// Apply f() to the cut with a potential improvement for one Boolean:
415//
416// If we have a Boolean X, and a cut: terms + a * X <= b;
417// By setting X to true or false, we have two inequalities:
418// terms <= b if X == 0
419// terms <= b - a if X == 1
420// We can apply f to both inequalities and recombine:
421// f(terms) <= f(b) * (1 - X) + f(b - a) * X
422// Which change the final coeff of X from f(a) to [f(b) - f(b - a)].
423// This can only improve the cut since f(b) >= f(b - a) + f(a)
424int ApplyWithPotentialBump(const std::function<IntegerValue(IntegerValue)>& f,
425 const IntegerValue divisor, CutData* cut) {
426 int bump_index = -1;
427 double bump_score = -1.0;
428 IntegerValue bump_coeff;
429 const IntegerValue remainder = PositiveRemainder(cut->rhs, divisor);
430 const IntegerValue f_remainder = f(remainder);
431 cut->rhs = ApplyToInt128(f, divisor, cut->rhs);
432 for (int i = 0; i < cut->terms.size(); ++i) {
433 CutTerm& entry = cut->terms[i];
434 const IntegerValue f_coeff = f(entry.coeff);
435 if (entry.bound_diff == 1) {
436 // TODO(user): we probably don't need int128 here, but we need
437 // t * (remainder - entry.coeff) not to overflow, and we can't really be
438 // sure.
439 const IntegerValue alternative =
440 entry.coeff > 0
441 ? f_remainder - f(remainder - entry.coeff)
442 : f_remainder - IntegerValue(static_cast<int64_t>(ApplyToInt128(
443 f, divisor,
444 absl::int128(remainder.value()) -
445 absl::int128(entry.coeff.value()))));
446 DCHECK_GE(alternative, f_coeff);
447 if (alternative > f_coeff) {
448 const double score = ToDouble(alternative - f_coeff) * entry.lp_value;
449 if (score > bump_score) {
450 bump_index = i;
451 bump_score = score;
452 bump_coeff = alternative;
453 }
454 }
455 }
456 entry.coeff = f_coeff;
457 }
458 if (bump_index >= 0) {
459 cut->terms[bump_index].coeff = bump_coeff;
460 }
461 return bump_index;
462}
463
464} // namespace
465
467 if (!VLOG_IS_ON(1)) return;
468 if (shared_stats_ == nullptr) return;
469 std::vector<std::pair<std::string, int64_t>> stats;
470 stats.push_back({"gub/num_improvements", num_improvements_});
471 stats.push_back(
472 {"gub/num_todo_both_complements", num_todo_both_complements_});
473 shared_stats_->AddStats(stats);
474}
475
477 const std::function<IntegerValue(IntegerValue)>& f,
478 const IntegerValue divisor, CutData* cut) {
479 // Reset.
480 last_num_bumps_ = 0;
481 last_num_lifts_ = 0;
482 last_num_gubs_ = 0;
483
484 // Keep initial coeffs.
485 const absl::int128 original_rhs = cut->rhs;
486 const int num_terms = cut->terms.size();
487 initial_coeffs_.resize(num_terms);
488 for (int i = 0; i < num_terms; ++i) {
489 const CutTerm& entry = cut->terms[i];
490 initial_coeffs_[i] = entry.coeff;
491 }
492
493 // Apply f().
494 const int bump_index = ApplyWithPotentialBump(f, divisor, cut);
495 last_num_bumps_ += (bump_index >= 0);
496
497 // We consider a positive coeff staying positive a lift.
498 // TODO(user): adjust as needed.
499 for (int i = 0; i < num_terms; ++i) {
500 if (initial_coeffs_[i] > 0 && cut->terms[i].coeff > 0) ++last_num_lifts_;
501 }
502
503 // Do not spend extra effort if we don't have a violated cut at this stage.
504 // TODO(user): also skip if there are no at most ones.
505 if (cut->ComputeViolation() <= 1e-6) return;
506
507 // Clear tmp data.
508 literals_.clear();
509 already_seen_.clear();
510 amo_count_.clear();
511
512 // Recover the LiteralIndex corresponding to the simple Boolean terms.
513 const int limit = integer_encoder_->NumVariables();
514 for (int i = 0; i < num_terms; ++i) {
515 const CutTerm& entry = cut->terms[i];
516 if (entry.bound_diff != 1) continue;
517 if (!entry.IsSimple()) continue;
518 if (!VariableIsPositive(entry.expr_vars[0])) continue;
519 if (entry.expr_vars[0] >= limit) continue;
520 if (integer_trail_->LevelZeroLowerBound(entry.expr_vars[0]) != 0) continue;
521 if (integer_trail_->LevelZeroUpperBound(entry.expr_vars[0]) != 1) continue;
522
523 bool complemented;
524 if (entry.expr_offset == 0 && entry.expr_coeffs[0] == 1) {
525 complemented = false;
526 } else if (entry.expr_offset == 1 && entry.expr_coeffs[0] == -1) {
527 complemented = true;
528 } else {
529 continue;
530 }
531
532 // TODO(user): Make this faster by keeping a vector in
533 // integer_encoder_.
534 const LiteralIndex literal_index = integer_encoder_->GetAssociatedLiteral(
536 if (literal_index != -1) {
537 absl::Span<const int> indices =
538 implications_->AtMostOneIndices(Literal(literal_index));
539 if (!indices.empty()) {
540 if (!complemented) {
541 // Because currently we only "lift" not complemented term in an amo
542 // with one complemented term, as an heuristic we only count the
543 // non complemeted entries in each amo.
544 for (const int index : indices) amo_count_[index]++;
545 }
546 literals_.push_back(
547 {i, Literal(literal_index), initial_coeffs_[i], complemented});
548 }
549 }
550 }
551
552 // Sort.
553 // TODO(user): This is an heuristic for cover cut. Generalize?
554 std::sort(literals_.begin(), literals_.end(),
555 [](const LiteralInCut a, LiteralInCut b) {
556 if (a.complemented != b.complemented) return a.complemented;
557 return a.original_coeff > b.original_coeff;
558 });
559
560 // Partition the literals into AMO.
561 // The index is the one of the "representative", i.e. the first literal.
562 //
563 // The idea is that if X + Y <=1, then in the cut, we can complement both
564 // into 1 - (C = 1 - X + Y) and apply the cut on this formula since C >= 0.
565 // This is better than complementing both individually, or complementing
566 // just one. Note that if the coeff of X and Y are different, we can lower
567 // the higher one to try to apply this too.
568 //
569 // TODO(user): If a literal can go into more than one amo, choose in a wiser
570 // way :)
571 bool rhs_changed_once = false;
572 const int num_literals = literals_.size();
573 const IntegerValue f_divisor = f(divisor);
574 for (int i = 0; i < num_literals; ++i) {
575 int best = -1;
576 int64_t best_sum = 0;
577 bool was_lifted = false;
578 const LiteralInCut& curr = literals_[i];
579
580 // Corner case where sometimes we have a term and its complement.
581 // Ideally they should be merged as it should be always beneficial to do
582 // so ? Add a step for this.
583 if (already_seen_.contains(curr.literal)) continue;
584 already_seen_.insert(curr.literal);
585
586 for (const int amo_index : implications_->AtMostOneIndices(curr.literal)) {
587 const int64_t count_or_index = amo_count_[amo_index];
588
589 // Tricky/Optim: we use negative amo_count_ to indicate if this amo was
590 // already used and encode the index as -1 -count_or_index.
591 if (count_or_index < 0) {
592 // We have a potential simplification !
593 const int rep_index_in_literals = -count_or_index - 1;
594 CHECK_LT(rep_index_in_literals, i);
595 const LiteralInCut& rep = literals_[rep_index_in_literals];
596 if (rep.complemented && !curr.complemented) {
597 const IntegerValue f_rep = cut->terms[rep.index].coeff;
598
599 // We can spit the original term in "two", one go with the at most
600 // one in the term that is complemented, the rest get through f()
601 // normally.
602 //
603 // So this replace f(b) with -f(a) + f(a + b) which is always
604 // beneficial. It is unclear if there is more than one choice of a,
605 // which one is the best though.
606 //
607 // To avoid overflow when computing
608 // f(curr.original_coeff + rep.original_coeff), we use the fact that
609 // f(k * divisor + rest) = k * f(divisor) + f(rest)
610 const IntegerValue x_d = curr.original_coeff / divisor;
611 const IntegerValue x_r = curr.original_coeff % divisor;
612 IntegerValue y_d = rep.original_coeff / divisor;
613 IntegerValue y_r = rep.original_coeff % divisor;
614 // We never want to apply f() outside [-divisor, divisor].
615 if (x_r > 0 && y_r > 0) {
616 y_r -= divisor;
617 y_d += 1;
618 } else if (x_r < 0 && y_r < 0) {
619 y_r += divisor;
620 y_d -= 1;
621 }
622
623 const IntegerValue new_coeff =
624 -f_rep + (x_d + y_d) * f_divisor + f(x_r + y_r);
625
626 // Note that it is okay if we already lifted this term, we can
627 // lift it further by selecting a different at most one.
628 if (new_coeff > cut->terms[curr.index].coeff) {
629 was_lifted = true;
630 ++last_num_gubs_;
631 ++num_improvements_;
632 cut->terms[curr.index].coeff = new_coeff; // Lifting
633 }
634 }
635
636 // Note that in practice this rarely trigger.
637 if (rep.original_coeff < 0 && rep.complemented &&
638 curr.original_coeff < 0 && curr.complemented) {
639 ++num_todo_both_complements_;
640
641 // TODO(user): Finish the demonstration when both coeff are different.
642 // Also it might be better to not merge such term and use the second
643 // one for lifting other terms? We seems to have some wrong cut too,
644 // Fix. Actually we cannot bump and then change the rhs since the
645 // bump value do depend on the rhs...
646 if (/* DISABLES CODE*/ (true)) continue;
647
648 // Lets start with both coeff equal to a > 0:
649 // The initial term aX + aY was rewriten as
650 // -a(1-X)-a(1-Y) + a + a + ... <= ...
651 // -a(1-X)-a(1-Y) + ... <= rhs
652 // f(-a)(1-X) + f(-a)(1-Y) + ... <= rhs
653 // Alternatively, we could write:
654 // -a(1 -(X+Y)) + ... <= rhs + a
655 // And because of the AMO, we can apply f() to this.
656 // f(-a).(1-X) -f(-a).Y <= f(rhs + a)
657 // f(-a).(1-X) + f(-a) -f(-a).Y <= f(rhs + a) + f(-a)
658 // same here <= f(rhs) here !
659 //
660 // TODO(user): We can do more than two changes, but then the original
661 // rhs is not the same! and we need to track the correction. fix.
662 if (!rhs_changed_once && rep.original_coeff == curr.original_coeff) {
663 const int64_t a = -rep.original_coeff.value();
664 const absl::int128 new_rhs =
665 ApplyToInt128(f, divisor, original_rhs + a) + f(-a).value();
666 CHECK_LE(new_rhs, cut->rhs);
667 if (new_rhs < cut->rhs) {
668 was_lifted = true;
669 cut->rhs = new_rhs;
670 rhs_changed_once = true;
671 }
672 }
673 }
674 } else {
675 if (count_or_index > best_sum) {
676 best_sum = count_or_index;
677 best = amo_index;
678 }
679 }
680 }
681 if (was_lifted) continue;
682
683 // Select a new AMO for this term.
684 // All future term will try "lifting" with this entry on that AMO.
685 if (curr.complemented && best != -1) {
686 amo_count_[best] = -i - 1;
687 }
688 }
689}
690
691// Compute the larger t <= max_t such that t * rhs_remainder >= divisor / 2.
692//
693// This is just a separate function as it is slightly faster to compute the
694// result only once.
695IntegerValue GetFactorT(IntegerValue rhs_remainder, IntegerValue divisor,
696 IntegerValue max_magnitude) {
697 // Make sure that when we multiply the rhs or the coefficient by a factor t,
698 // we do not have an integer overflow. Note that the rhs should be counted
699 // in max_magnitude since we will apply f() on it.
700 IntegerValue max_t(std::numeric_limits<int64_t>::max());
701 if (max_magnitude != 0) {
702 max_t = max_t / max_magnitude;
703 }
704 return rhs_remainder == 0
705 ? max_t
706 : std::min(max_t, CeilRatio(divisor / 2, rhs_remainder));
707}
708
709std::function<IntegerValue(IntegerValue)> GetSuperAdditiveRoundingFunction(
710 IntegerValue rhs_remainder, IntegerValue divisor, IntegerValue t,
711 IntegerValue max_scaling) {
712 DCHECK_GE(max_scaling, 1);
713 DCHECK_GE(t, 1);
714
715 // Adjust after the multiplication by t.
716 rhs_remainder *= t;
717 DCHECK_LT(rhs_remainder, divisor);
718
719 // Make sure we don't have an integer overflow below. Note that we assume that
720 // divisor and the maximum coeff magnitude are not too different (maybe a
721 // factor 1000 at most) so that the final result will never overflow.
722 max_scaling =
723 std::min(max_scaling, std::numeric_limits<int64_t>::max() / divisor);
724
725 const IntegerValue size = divisor - rhs_remainder;
726 if (max_scaling == 1 || size == 1) {
727 // TODO(user): Use everywhere a two step computation to avoid overflow?
728 // First divide by divisor, then multiply by t. For now, we limit t so that
729 // we never have an overflow instead.
730 return [t, divisor](IntegerValue coeff) {
731 return FloorRatio(t * coeff, divisor);
732 };
733 } else if (size <= max_scaling) {
734 return [size, rhs_remainder, t, divisor](IntegerValue coeff) {
735 const IntegerValue t_coeff = t * coeff;
736 const IntegerValue ratio = FloorRatio(t_coeff, divisor);
737 const IntegerValue remainder = PositiveRemainder(t_coeff, divisor);
738 const IntegerValue diff = remainder - rhs_remainder;
739 return size * ratio + std::max(IntegerValue(0), diff);
740 };
741 } else if (max_scaling.value() * rhs_remainder.value() < divisor) {
742 // Because of our max_t limitation, the rhs_remainder might stay small.
743 //
744 // If it is "too small" we cannot use the code below because it will not be
745 // valid. So we just divide divisor into max_scaling bucket. The
746 // rhs_remainder will be in the bucket 0.
747 //
748 // Note(user): This seems the same as just increasing t, modulo integer
749 // overflows. Maybe we should just always do the computation like this so
750 // that we can use larger t even if coeff is close to kint64max.
751 return [t, divisor, max_scaling](IntegerValue coeff) {
752 const IntegerValue t_coeff = t * coeff;
753 const IntegerValue ratio = FloorRatio(t_coeff, divisor);
754 const IntegerValue remainder = PositiveRemainder(t_coeff, divisor);
755 const IntegerValue bucket = FloorRatio(remainder * max_scaling, divisor);
756 return max_scaling * ratio + bucket;
757 };
758 } else {
759 // We divide (size = divisor - rhs_remainder) into (max_scaling - 1) buckets
760 // and increase the function by 1 / max_scaling for each of them.
761 //
762 // Note that for different values of max_scaling, we get a family of
763 // functions that do not dominate each others. So potentially, a max scaling
764 // as low as 2 could lead to the better cut (this is exactly the Letchford &
765 // Lodi function).
766 //
767 // Another interesting fact, is that if we want to compute the maximum alpha
768 // for a constraint with 2 terms like:
769 // divisor * Y + (ratio * divisor + remainder) * X
770 // <= rhs_ratio * divisor + rhs_remainder
771 // so that we have the cut:
772 // Y + (ratio + alpha) * X <= rhs_ratio
773 // This is the same as computing the maximum alpha such that for all integer
774 // X > 0 we have CeilRatio(alpha * divisor * X, divisor)
775 // <= CeilRatio(remainder * X - rhs_remainder, divisor).
776 // We can prove that this alpha is of the form (n - 1) / n, and it will
777 // be reached by such function for a max_scaling of n.
778 //
779 // TODO(user): This function is not always maximal when
780 // size % (max_scaling - 1) == 0. Improve?
781 return [size, rhs_remainder, t, divisor, max_scaling](IntegerValue coeff) {
782 const IntegerValue t_coeff = t * coeff;
783 const IntegerValue ratio = FloorRatio(t_coeff, divisor);
784 const IntegerValue remainder = PositiveRemainder(t_coeff, divisor);
785 const IntegerValue diff = remainder - rhs_remainder;
786 const IntegerValue bucket =
787 diff > 0 ? CeilRatio(diff * (max_scaling - 1), size)
788 : IntegerValue(0);
789 return max_scaling * ratio + bucket;
790 };
791 }
792}
793
794std::function<IntegerValue(IntegerValue)> GetSuperAdditiveStrengtheningFunction(
795 IntegerValue positive_rhs, IntegerValue min_magnitude) {
796 CHECK_GT(positive_rhs, 0);
797 CHECK_GT(min_magnitude, 0);
798
799 if (min_magnitude >= CeilRatio(positive_rhs, 2)) {
800 return [positive_rhs](IntegerValue v) {
801 if (v >= 0) return IntegerValue(0);
802 if (v > -positive_rhs) return IntegerValue(-1);
803 return IntegerValue(-2);
804 };
805 }
806
807 // The transformation only work if 2 * second_threshold >= positive_rhs.
808 //
809 // TODO(user): Limit the number of value used with scaling like above.
810 min_magnitude = std::min(min_magnitude, FloorRatio(positive_rhs, 2));
811 const IntegerValue second_threshold = positive_rhs - min_magnitude;
812 return [positive_rhs, min_magnitude, second_threshold](IntegerValue v) {
813 if (v >= 0) return IntegerValue(0);
814 if (v <= -positive_rhs) return -positive_rhs;
815 if (v <= -second_threshold) return -second_threshold;
816
817 // This should actually never happen by the definition of min_magnitude.
818 // But with it, the function is supper-additive even if min_magnitude is not
819 // correct.
820 if (v >= -min_magnitude) return -min_magnitude;
821
822 // TODO(user): we might want to intoduce some step to reduce the final
823 // magnitude of the cut.
824 return v;
825 };
826}
827
828std::function<IntegerValue(IntegerValue)>
830 IntegerValue scaling) {
831 if (scaling >= positive_rhs) {
832 // Simple case, no scaling required.
833 return [positive_rhs](IntegerValue v) {
834 if (v >= 0) return IntegerValue(0);
835 if (v <= -positive_rhs) return -positive_rhs;
836 return v;
837 };
838 }
839
840 // We need to scale.
841 scaling =
842 std::min(scaling, IntegerValue(std::numeric_limits<int64_t>::max()) /
843 positive_rhs);
844 if (scaling == 1) {
845 return [](IntegerValue v) {
846 if (v >= 0) return IntegerValue(0);
847 return IntegerValue(-1);
848 };
849 }
850
851 // We divide [-positive_rhs + 1, 0] into (scaling - 1) bucket.
852 return [positive_rhs, scaling](IntegerValue v) {
853 if (v >= 0) return IntegerValue(0);
854 if (v <= -positive_rhs) return -scaling;
855 return FloorRatio(v * (scaling - 1), (positive_rhs - 1));
856 };
857}
858
860 if (!VLOG_IS_ON(1)) return;
861 if (shared_stats_ == nullptr) return;
862 std::vector<std::pair<std::string, int64_t>> stats;
863 stats.push_back({"rounding_cut/num_initial_ibs_", total_num_initial_ibs_});
864 stats.push_back(
865 {"rounding_cut/num_initial_merges_", total_num_initial_merges_});
866 stats.push_back({"rounding_cut/num_pos_lifts", total_num_pos_lifts_});
867 stats.push_back({"rounding_cut/num_neg_lifts", total_num_neg_lifts_});
868 stats.push_back(
869 {"rounding_cut/num_post_complements", total_num_post_complements_});
870 stats.push_back({"rounding_cut/num_overflows", total_num_overflow_abort_});
871 stats.push_back({"rounding_cut/num_adjusts", total_num_coeff_adjust_});
872 stats.push_back({"rounding_cut/num_merges", total_num_merges_});
873 stats.push_back({"rounding_cut/num_bumps", total_num_bumps_});
874 stats.push_back(
875 {"rounding_cut/num_final_complements", total_num_final_complements_});
876 stats.push_back({"rounding_cut/num_dominating_f", total_num_dominating_f_});
877 shared_stats_->AddStats(stats);
878}
879
880double IntegerRoundingCutHelper::GetScaledViolation(
881 IntegerValue divisor, IntegerValue max_scaling,
882 IntegerValue remainder_threshold, const CutData& cut) {
883 absl::int128 rhs = cut.rhs;
884 IntegerValue max_magnitude = cut.max_magnitude;
885 const IntegerValue initial_rhs_remainder = PositiveRemainder(rhs, divisor);
886 if (initial_rhs_remainder < remainder_threshold) return 0.0;
887
888 // We will adjust coefficient that are just under an exact multiple of
889 // divisor to an exact multiple. This is meant to get rid of small errors
890 // that appears due to rounding error in our exact computation of the
891 // initial constraint given to this class.
892 //
893 // Each adjustement will cause the initial_rhs_remainder to increase, and we
894 // do not want to increase it above divisor. Our threshold below guarantees
895 // this. Note that the higher the rhs_remainder becomes, the more the
896 // function f() has a chance to reduce the violation, so it is not always a
897 // good idea to use all the slack we have between initial_rhs_remainder and
898 // divisor.
899 //
900 // TODO(user): We could see if for a fixed function f, the increase is
901 // interesting?
902 // before: f(rhs) - f(coeff) * lp_value
903 // after: f(rhs + increase * bound_diff) - f(coeff + increase) * lp_value.
904 adjusted_coeffs_.clear();
905 const IntegerValue adjust_threshold =
906 (divisor - initial_rhs_remainder - 1) /
907 IntegerValue(std::max(1000, cut.num_relevant_entries));
908 if (adjust_threshold > 0) {
909 // Even before we finish the adjust, we can have a lower bound on the
910 // activily loss using this divisor, and so we can abort early. This is
911 // similar to what is done below.
912 double max_violation = static_cast<double>(initial_rhs_remainder.value());
913 for (int i = 0; i < cut.num_relevant_entries; ++i) {
914 const CutTerm& entry = cut.terms[i];
915 const IntegerValue remainder = PositiveRemainder(entry.coeff, divisor);
916 if (remainder == 0) continue;
917 if (remainder <= initial_rhs_remainder) {
918 // We do not know exactly f() yet, but it will always round to the
919 // floor of the division by divisor in this case.
920 max_violation -=
921 static_cast<double>(remainder.value()) * entry.lp_value;
922 if (max_violation <= 1e-3) return 0.0;
923 continue;
924 }
925
926 // Adjust coeff of the form k * divisor - epsilon.
927 const IntegerValue adjust = divisor - remainder;
928 const IntegerValue prod = CapProdI(adjust, entry.bound_diff);
929 if (prod <= adjust_threshold) {
930 // TODO(user): This overflow seems pretty rare and might be avoidable
931 // more efficiently.
932 const IntegerValue new_coeff = CapAddI(entry.coeff, adjust);
933 if (!AtMinOrMaxInt64I(new_coeff)) {
934 rhs += absl::int128(prod.value());
935 adjusted_coeffs_.push_back({i, new_coeff});
936 max_magnitude = std::max(max_magnitude, IntTypeAbs(new_coeff));
937 }
938 }
939 }
940 }
941
942 const IntegerValue rhs_remainder = PositiveRemainder(rhs, divisor);
943 const IntegerValue t = GetFactorT(rhs_remainder, divisor, max_magnitude);
944 const auto f =
945 GetSuperAdditiveRoundingFunction(rhs_remainder, divisor, t, max_scaling);
946
947 // As we round coefficients, we will compute the loss compared to the
948 // current scaled constraint activity. As soon as this loss crosses the
949 // slack, then we known that there is no violation and we can abort early.
950 //
951 // TODO(user): modulo the scaling, we could compute the exact threshold
952 // using our current best cut. Note that we also have to account the change
953 // in slack due to the adjust code above.
954 const double scaling = ToDouble(f(divisor)) / ToDouble(divisor);
955 double max_violation = scaling * ToDouble(rhs_remainder);
956
957 // Apply f() to the cut and compute the cut violation. Note that it is
958 // okay to just look at the relevant indices since the other have a lp
959 // value which is almost zero. Doing it like this is faster, and even if
960 // the max_magnitude might be off it should still be relevant enough.
961 double violation = -static_cast<double>(ApplyToInt128(f, divisor, rhs));
962 double l2_norm = 0.0;
963 int adjusted_coeffs_index = 0;
964 for (int i = 0; i < cut.num_relevant_entries; ++i) {
965 const CutTerm& entry = cut.terms[i];
966
967 // Adjust coeff according to our previous computation if needed.
968 IntegerValue coeff = entry.coeff;
969 if (adjusted_coeffs_index < adjusted_coeffs_.size() &&
970 adjusted_coeffs_[adjusted_coeffs_index].first == i) {
971 coeff = adjusted_coeffs_[adjusted_coeffs_index].second;
972 adjusted_coeffs_index++;
973 }
974
975 if (coeff == 0) continue;
976 const IntegerValue new_coeff = f(coeff);
977 const double new_coeff_double = ToDouble(new_coeff);
978 const double lp_value = entry.lp_value;
979
980 // TODO(user): Shall we compute the norm after slack are substituted back?
981 // it might be widely different. Another reason why this might not be
982 // the best measure.
983 l2_norm += new_coeff_double * new_coeff_double;
984 violation += new_coeff_double * lp_value;
985 max_violation -= (scaling * ToDouble(coeff) - new_coeff_double) * lp_value;
986 if (max_violation <= 1e-3) return 0.0;
987 }
988 if (l2_norm == 0.0) return 0.0;
989
990 // Here we scale by the L2 norm over the "relevant" positions. This seems
991 // to work slighly better in practice.
992 //
993 // Note(user): The non-relevant position have an LP value of zero. If their
994 // coefficient is positive, it seems good not to take it into account in the
995 // norm since the larger this coeff is, the stronger the cut. If the coeff
996 // is negative though, a large coeff means a small increase from zero of the
997 // lp value will make the cut satisfied, so we might want to look at them.
998 return violation / sqrt(l2_norm);
999}
1000
1001// TODO(user): This is slow, 50% of run time on a2c1s1.pb.gz. Optimize!
1003 RoundingOptions options, const CutData& base_ct,
1004 ImpliedBoundsProcessor* ib_processor) {
1005 // Try IB before heuristic?
1006 // This should be better except it can mess up the norm and the divisors.
1007 cut_ = base_ct;
1008 if (options.use_ib_before_heuristic && ib_processor != nullptr) {
1009 std::vector<CutTerm>* new_bool_terms =
1010 ib_processor->ClearedMutableTempTerms();
1011 for (CutTerm& term : cut_.terms) {
1012 if (term.bound_diff <= 1) continue;
1013 if (!term.HasRelevantLpValue()) continue;
1014
1015 if (options.prefer_positive_ib && term.coeff < 0) {
1016 // We complement the term before trying the implied bound.
1017 term.Complement(&cut_.rhs);
1018 if (ib_processor->TryToExpandWithLowerImpliedbound(
1019 IntegerValue(1),
1020 /*complement=*/true, &term, &cut_.rhs, new_bool_terms)) {
1021 ++total_num_initial_ibs_;
1022 continue;
1023 }
1024 term.Complement(&cut_.rhs);
1025 }
1026
1027 if (ib_processor->TryToExpandWithLowerImpliedbound(
1028 IntegerValue(1),
1029 /*complement=*/true, &term, &cut_.rhs, new_bool_terms)) {
1030 ++total_num_initial_ibs_;
1031 }
1032 }
1033
1034 // TODO(user): We assume that this is called with and without the option
1035 // use_ib_before_heuristic, so that we can abort if no IB has been applied
1036 // since then we will redo the computation. This is not really clean.
1037 if (new_bool_terms->empty()) return false;
1038 total_num_initial_merges_ +=
1040 absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_);
1041 }
1042
1043 // Our heuristic will try to generate a few different cuts, and we will keep
1044 // the most violated one scaled by the l2 norm of the relevant position.
1045 //
1046 // TODO(user): Experiment for the best value of this initial violation
1047 // threshold. Note also that we use the l2 norm on the restricted position
1048 // here. Maybe we should change that? On that note, the L2 norm usage seems
1049 // a bit weird to me since it grows with the number of term in the cut. And
1050 // often, we already have a good cut, and we make it stronger by adding
1051 // extra terms that do not change its activity.
1052 //
1053 // The discussion above only concern the best_scaled_violation initial
1054 // value. The remainder_threshold allows to not consider cuts for which the
1055 // final efficacity is clearly lower than 1e-3 (it is a bound, so we could
1056 // generate cuts with a lower efficacity than this).
1057 //
1058 // TODO(user): If the rhs is small and close to zero, we might want to
1059 // consider different way of complementing the variables.
1060 cut_.SortRelevantEntries();
1061 const IntegerValue remainder_threshold(
1062 std::max(IntegerValue(1), cut_.max_magnitude / 1000));
1063 if (cut_.rhs >= 0 && cut_.rhs < remainder_threshold.value()) {
1064 return false;
1065 }
1066
1067 // There is no point trying twice the same divisor or a divisor that is too
1068 // small. Note that we use a higher threshold than the remainder_threshold
1069 // because we can boost the remainder thanks to our adjusting heuristic
1070 // below and also because this allows to have cuts with a small range of
1071 // coefficients.
1072 divisors_.clear();
1073 for (const CutTerm& entry : cut_.terms) {
1074 // Note that because of the slacks, initial coeff are here too.
1075 const IntegerValue magnitude = IntTypeAbs(entry.coeff);
1076 if (magnitude <= remainder_threshold) continue;
1077 divisors_.push_back(magnitude);
1078
1079 // If we have too many divisor to try, restrict to the first ones which
1080 // should correspond to the highest lp values.
1081 if (divisors_.size() > 50) break;
1082 }
1083 if (divisors_.empty()) return false;
1084 gtl::STLSortAndRemoveDuplicates(&divisors_, std::greater<IntegerValue>());
1085
1086 // Note that most of the time is spend here since we call this function on
1087 // many linear equation, and just a few of them have a good enough scaled
1088 // violation. We can spend more time afterwards to tune the cut.
1089 //
1090 // TODO(user): Avoid quadratic algorithm? Note that we are quadratic in
1091 // relevant positions not the full cut size, but this is still too much on
1092 // some problems.
1093 IntegerValue best_divisor(0);
1094 double best_scaled_violation = 1e-3;
1095 for (const IntegerValue divisor : divisors_) {
1096 // Note that the function will abort right away if PositiveRemainder() is
1097 // not good enough, so it is quick for bad divisor.
1098 const double violation = GetScaledViolation(divisor, options.max_scaling,
1099 remainder_threshold, cut_);
1100 if (violation > best_scaled_violation) {
1101 best_scaled_violation = violation;
1102 best_adjusted_coeffs_ = adjusted_coeffs_;
1103 best_divisor = divisor;
1104 }
1105 }
1106 if (best_divisor == 0) return false;
1107
1108 // Try best_divisor divided by small number.
1109 for (int div = 2; div < 9; ++div) {
1110 const IntegerValue divisor = best_divisor / IntegerValue(div);
1111 if (divisor <= 1) continue;
1112 const double violation = GetScaledViolation(divisor, options.max_scaling,
1113 remainder_threshold, cut_);
1114 if (violation > best_scaled_violation) {
1115 best_scaled_violation = violation;
1116 best_adjusted_coeffs_ = adjusted_coeffs_;
1117 best_divisor = divisor;
1118 }
1119 }
1120
1121 // Re try complementation on the transformed cut.
1122 // TODO(user): This can be quadratic! we don't want to try too much of them.
1123 // Or optimize the algo, we should be able to be more incremental here.
1124 // see on g200x740.pb.gz for instance.
1125 for (CutTerm& entry : cut_.terms) {
1126 if (!entry.HasRelevantLpValue()) break;
1127 if (entry.coeff % best_divisor == 0) continue;
1128
1129 // Temporary complement this variable.
1130 entry.Complement(&cut_.rhs);
1131
1132 const double violation = GetScaledViolation(
1133 best_divisor, options.max_scaling, remainder_threshold, cut_);
1134 if (violation > best_scaled_violation) {
1135 // keep the change.
1136 ++total_num_post_complements_;
1137 best_scaled_violation = violation;
1138 best_adjusted_coeffs_ = adjusted_coeffs_;
1139 } else {
1140 // Restore.
1141 entry.Complement(&cut_.rhs);
1142 }
1143 }
1144
1145 // Adjust coefficients as computed by the best GetScaledViolation().
1146 for (const auto [index, new_coeff] : best_adjusted_coeffs_) {
1147 ++total_num_coeff_adjust_;
1148 CutTerm& entry = cut_.terms[index];
1149 const IntegerValue remainder = new_coeff - entry.coeff;
1150 CHECK_GT(remainder, 0);
1151 entry.coeff = new_coeff;
1152 cut_.rhs += absl::int128(remainder.value()) *
1153 absl::int128(entry.bound_diff.value());
1154 cut_.max_magnitude = std::max(cut_.max_magnitude, IntTypeAbs(new_coeff));
1155 }
1156
1157 // Create the base super-additive function f().
1158 const IntegerValue rhs_remainder = PositiveRemainder(cut_.rhs, best_divisor);
1159 IntegerValue factor_t =
1160 GetFactorT(rhs_remainder, best_divisor, cut_.max_magnitude);
1161 auto f = GetSuperAdditiveRoundingFunction(rhs_remainder, best_divisor,
1162 factor_t, options.max_scaling);
1163
1164 // Look amongst all our possible function f() for one that dominate greedily
1165 // our current best one. Note that we prefer lower scaling factor since that
1166 // result in a cut with lower coefficients.
1167 //
1168 // We only look at relevant position and ignore the other. Not sure this is
1169 // the best approach.
1170 remainders_.clear();
1171 for (const CutTerm& entry : cut_.terms) {
1172 if (!entry.HasRelevantLpValue()) break;
1173 const IntegerValue coeff = entry.coeff;
1174 const IntegerValue r = PositiveRemainder(coeff, best_divisor);
1175 if (r > rhs_remainder) remainders_.push_back(r);
1176 }
1177 gtl::STLSortAndRemoveDuplicates(&remainders_);
1178 if (remainders_.size() <= 100) {
1179 best_rs_.clear();
1180 for (const IntegerValue r : remainders_) {
1181 best_rs_.push_back(f(r));
1182 }
1183 IntegerValue best_d = f(best_divisor);
1184
1185 // Note that the complexity seems high 100 * 2 * options.max_scaling, but
1186 // this only run on cuts that are already efficient and the inner loop tend
1187 // to abort quickly. I didn't see this code in the cpu profile so far.
1188 for (const IntegerValue t :
1189 {IntegerValue(1),
1190 GetFactorT(rhs_remainder, best_divisor, cut_.max_magnitude)}) {
1191 for (IntegerValue s(2); s <= options.max_scaling; ++s) {
1192 const auto g =
1193 GetSuperAdditiveRoundingFunction(rhs_remainder, best_divisor, t, s);
1194 int num_strictly_better = 0;
1195 rs_.clear();
1196 const IntegerValue d = g(best_divisor);
1197 for (int i = 0; i < best_rs_.size(); ++i) {
1198 const IntegerValue temp = g(remainders_[i]);
1199 if (temp * best_d < best_rs_[i] * d) break;
1200 if (temp * best_d > best_rs_[i] * d) num_strictly_better++;
1201 rs_.push_back(temp);
1202 }
1203 if (rs_.size() == best_rs_.size() && num_strictly_better > 0) {
1204 ++total_num_dominating_f_;
1205 f = g;
1206 factor_t = t;
1207 best_rs_ = rs_;
1208 best_d = d;
1209 }
1210 }
1211 }
1212 }
1213
1214 // Use implied bounds to "lift" Booleans into the cut.
1215 // This should lead to stronger cuts even if the norms might be worse.
1216 num_ib_used_ = 0;
1217 if (ib_processor != nullptr) {
1218 const auto [num_lb, num_ub, num_merges] =
1219 ib_processor->PostprocessWithImpliedBound(f, factor_t, &cut_);
1220 total_num_pos_lifts_ += num_lb;
1221 total_num_neg_lifts_ += num_ub;
1222 total_num_merges_ += num_merges;
1223 num_ib_used_ = num_lb + num_ub;
1224 }
1225
1226 // More complementation, but for the same f.
1227 // If we can do that, it probably means our heuristics above are not great.
1228 for (int i = 0; i < 3; ++i) {
1229 const int64_t saved = total_num_final_complements_;
1230 for (CutTerm& entry : cut_.terms) {
1231 // Complementing an entry gives:
1232 // [a * X <= b] -> [-a * (diff - X) <= b - a * diff]
1233 //
1234 // We will compare what happen when we apply f:
1235 // [f(b) - f(a) * lp(X)] -> [f(b - a * diff) - f(-a) * (diff - lp(X))].
1236 //
1237 // If lp(X) is zero, then the transformation is always worse.
1238 // Because f(b - a * diff) >= f(b) + f(-a) * diff by super-additivity.
1239 //
1240 // However the larger is X, the better it gets since at diff, we have
1241 // f(b) >= f(b - a * diff) + f(a * diff) >= f(b - a * diff) + f(a) * diff.
1242 //
1243 // TODO(user): It is still unclear if we have a * X + b * (1 - X) <= rhs
1244 // for a Boolean X, what is the best way to apply f and if we should merge
1245 // the terms. If there is no other terms, best is probably
1246 // f(rhs - a) * X + f(rhs - b) * (1 - X).
1247 if (entry.coeff % best_divisor == 0) continue;
1248 if (!entry.HasRelevantLpValue()) continue;
1249
1250 // Avoid potential overflow here.
1251 const IntegerValue prod(CapProdI(entry.bound_diff, entry.coeff));
1252 const IntegerValue remainder = PositiveRemainder(cut_.rhs, best_divisor);
1253 if (ProdOverflow(factor_t, prod)) continue;
1254 if (ProdOverflow(factor_t, CapSubI(remainder, prod))) continue;
1255
1256 const double lp1 =
1257 ToDouble(f(remainder)) - ToDouble(f(entry.coeff)) * entry.lp_value;
1258 const double lp2 = ToDouble(f(remainder - prod)) -
1259 ToDouble(f(-entry.coeff)) *
1260 (ToDouble(entry.bound_diff) - entry.lp_value);
1261 if (lp2 + 1e-2 < lp1) {
1262 entry.Complement(&cut_.rhs);
1263 ++total_num_final_complements_;
1264 }
1265 }
1266 if (total_num_final_complements_ == saved) break;
1267 }
1268
1269 gub_helper_.ApplyWithPotentialBumpAndGUB(f, best_divisor, &cut_);
1270 total_num_bumps_ += gub_helper_.last_num_bumps();
1271 return true;
1272}
1273
1275 if (!VLOG_IS_ON(1)) return;
1276 if (shared_stats_ == nullptr) return;
1277
1278 std::vector<std::pair<std::string, int64_t>> stats;
1279 const auto add_stats = [&stats](absl::string_view name, const CutStats& s) {
1280 stats.push_back(
1281 {absl::StrCat(name, "num_overflows"), s.num_overflow_aborts});
1282 stats.push_back({absl::StrCat(name, "num_lifting"), s.num_lifting});
1283 stats.push_back({absl::StrCat(name, "num_initial_ib"), s.num_initial_ibs});
1284 stats.push_back({absl::StrCat(name, "num_implied_lb"), s.num_lb_ibs});
1285 stats.push_back({absl::StrCat(name, "num_implied_ub"), s.num_ub_ibs});
1286 stats.push_back({absl::StrCat(name, "num_bumps"), s.num_bumps});
1287 stats.push_back({absl::StrCat(name, "num_gubs"), s.num_gubs});
1288 stats.push_back({absl::StrCat(name, "num_cuts"), s.num_cuts});
1289 stats.push_back({absl::StrCat(name, "num_merges"), s.num_merges});
1290 };
1291 add_stats("cover_cut/", cover_stats_);
1292 add_stats("flow_cut/", flow_stats_);
1293 add_stats("ls_cut/", ls_stats_);
1294 shared_stats_->AddStats(stats);
1295}
1296
1297namespace {
1298
1299struct LargeCoeffFirst {
1300 bool operator()(const CutTerm& a, const CutTerm& b) const {
1301 if (a.coeff == b.coeff) {
1302 return a.LpDistToMaxValue() > b.LpDistToMaxValue();
1303 }
1304 return a.coeff > b.coeff;
1305 }
1306};
1307
1308struct SmallContribFirst {
1309 bool operator()(const CutTerm& a, const CutTerm& b) const {
1310 const double contrib_a = a.lp_value * static_cast<double>(a.coeff.value());
1311 const double contrib_b = b.lp_value * static_cast<double>(b.coeff.value());
1312 return contrib_a < contrib_b;
1313 }
1314};
1315
1316struct LargeContribFirst {
1317 bool operator()(const CutTerm& a, const CutTerm& b) const {
1318 const double contrib_a = a.lp_value * static_cast<double>(a.coeff.value());
1319 const double contrib_b = b.lp_value * static_cast<double>(b.coeff.value());
1320 return contrib_a > contrib_b;
1321 }
1322};
1323
1324struct LargeLpValueFirst {
1325 bool operator()(const CutTerm& a, const CutTerm& b) const {
1326 if (a.lp_value == b.lp_value) {
1327 // Prefer high coefficients if the distance is the same.
1328 // We have more chance to get a cover this way.
1329 return a.coeff > b.coeff;
1330 }
1331 return a.lp_value > b.lp_value;
1332 }
1333};
1334
1335// When minimizing a cover we want to remove bad score (large dist) divided by
1336// item size. Note that here we assume item are "boolean" fully taken or not.
1337// for general int we use (lp_dist / bound_diff) / (coeff * bound_diff) which
1338// lead to the same formula as for Booleans.
1339struct KnapsackAdd {
1340 bool operator()(const CutTerm& a, const CutTerm& b) const {
1341 const double contrib_a =
1342 a.LpDistToMaxValue() / static_cast<double>(a.coeff.value());
1343 const double contrib_b =
1344 b.LpDistToMaxValue() / static_cast<double>(b.coeff.value());
1345 return contrib_a < contrib_b;
1346 }
1347};
1348struct KnapsackRemove {
1349 bool operator()(const CutTerm& a, const CutTerm& b) const {
1350 const double contrib_a =
1351 a.LpDistToMaxValue() / static_cast<double>(a.coeff.value());
1352 const double contrib_b =
1353 b.LpDistToMaxValue() / static_cast<double>(b.coeff.value());
1354 return contrib_a > contrib_b;
1355 }
1356};
1357
1358} // namespace.
1359
1360// Transform to a minimal cover. We want to greedily remove the largest coeff
1361// first, so we have more chance for the "lifting" below which can increase
1362// the cut violation. If the coeff are the same, we prefer to remove high
1363// distance from upper bound first.
1364template <class Compare>
1365int CoverCutHelper::MinimizeCover(int cover_size, absl::int128 slack) {
1366 CHECK_GT(slack, 0);
1367 absl::Span<CutTerm> terms = absl::MakeSpan(cut_.terms);
1368 std::sort(terms.begin(), terms.begin() + cover_size, Compare());
1369 for (int i = 0; i < cover_size;) {
1370 const CutTerm& t = terms[i];
1371 const absl::int128 contrib =
1372 absl::int128(t.bound_diff.value()) * absl::int128(t.coeff.value());
1373 if (contrib < slack) {
1374 slack -= contrib;
1375 std::swap(terms[i], terms[--cover_size]);
1376 } else {
1377 ++i;
1378 }
1379 }
1380 DCHECK_GT(cover_size, 0);
1381 return cover_size;
1382}
1383
1384template <class CompareAdd, class CompareRemove>
1385int CoverCutHelper::GetCoverSize(int relevant_size) {
1386 if (relevant_size == 0) return 0;
1387 absl::Span<CutTerm> terms = absl::MakeSpan(cut_.terms);
1388
1389 // Take first all at variable at upper bound, and ignore the one at lower
1390 // bound.
1391 int part1 = 0;
1392 for (int i = 0; i < relevant_size;) {
1393 CutTerm& term = terms[i];
1394 const double dist = term.LpDistToMaxValue();
1395 if (dist < 1e-6) {
1396 // Move to part 1.
1397 std::swap(term, terms[part1]);
1398 ++i;
1399 ++part1;
1400 } else if (term.lp_value > 1e-6) {
1401 // Keep in part 2.
1402 ++i;
1403 } else {
1404 // Exclude entirely (part 3).
1405 --relevant_size;
1406 std::swap(term, terms[relevant_size]);
1407 }
1408 }
1409 std::sort(terms.begin() + part1, terms.begin() + relevant_size, CompareAdd());
1410
1411 // We substract the initial rhs to avoid overflow.
1412 DCHECK_GE(cut_.rhs, 0);
1413 absl::int128 max_shifted_activity = -cut_.rhs;
1414 absl::int128 shifted_round_up = -cut_.rhs;
1415 int cover_size = 0;
1416 for (; cover_size < relevant_size; ++cover_size) {
1417 if (max_shifted_activity > 0) break;
1418 const CutTerm& term = terms[cover_size];
1419 max_shifted_activity += absl::int128(term.coeff.value()) *
1420 absl::int128(term.bound_diff.value());
1421 shifted_round_up += absl::int128(term.coeff.value()) *
1422 std::min(absl::int128(term.bound_diff.value()),
1423 absl::int128(std::ceil(term.lp_value - 1e-6)));
1424 }
1425
1426 DCHECK_GE(cover_size, 0);
1427 if (shifted_round_up <= 0) {
1428 return 0;
1429 }
1430 return MinimizeCover<CompareRemove>(cover_size, max_shifted_activity);
1431}
1432
1433// Try a simple cover heuristic.
1434// Look for violated CUT of the form: sum (UB - X) or (X - LB) >= 1.
1435int CoverCutHelper::GetCoverSizeForBooleans() {
1436 absl::Span<CutTerm> terms = absl::MakeSpan(cut_.terms);
1437
1438 // Sorting can be slow, so we start by splitting the vector in 3 parts
1439 // - Can always be in cover
1440 // - Candidates that needs sorting
1441 // - At most one can be in cover (we keep the max).
1442 int part1 = 0;
1443 int relevant_size = terms.size();
1444 int best_in_part3 = -1;
1445 const double threshold = 1.0 - 1.0 / static_cast<double>(terms.size());
1446 for (int i = 0; i < relevant_size;) {
1447 const double lp_value = terms[i].lp_value;
1448
1449 // Exclude non-Boolean.
1450 if (terms[i].bound_diff > 1) {
1451 --relevant_size;
1452 std::swap(terms[i], terms[relevant_size]);
1453 continue;
1454 }
1455
1456 if (lp_value >= threshold) {
1457 // Move to part 1.
1458 std::swap(terms[i], terms[part1]);
1459 ++i;
1460 ++part1;
1461 } else if (lp_value > 0.5) {
1462 // Keep in part 2.
1463 ++i;
1464 } else {
1465 // Only keep the max (part 3).
1466 --relevant_size;
1467 std::swap(terms[i], terms[relevant_size]);
1468
1469 if (best_in_part3 == -1 ||
1470 LargeLpValueFirst()(terms[relevant_size], terms[best_in_part3])) {
1471 best_in_part3 = relevant_size;
1472 }
1473 }
1474 }
1475
1476 if (best_in_part3 != -1) {
1477 std::swap(terms[relevant_size], terms[best_in_part3]);
1478 ++relevant_size;
1479 }
1480
1481 // Sort by decreasing Lp value.
1482 std::sort(terms.begin() + part1, terms.begin() + relevant_size,
1483 LargeLpValueFirst());
1484
1485 double activity = 0.0;
1486 int cover_size = relevant_size;
1487 absl::int128 slack = -cut_.rhs;
1488 for (int i = 0; i < relevant_size; ++i) {
1489 const CutTerm& term = terms[i];
1490 activity += term.LpDistToMaxValue();
1491
1492 // As an heuristic we select all the term so that the sum of distance
1493 // to the upper bound is <= 1.0. If the corresponding slack is positive,
1494 // then we will have a cut of violation at least 0.0. Note that this
1495 // violation can be improved by the lifting.
1496 //
1497 // TODO(user): experiment with different threshold (even greater than one).
1498 // Or come up with an algo that incorporate the lifting into the heuristic.
1499 if (activity > 0.9999) {
1500 cover_size = i; // before this entry.
1501 break;
1502 }
1503
1504 // TODO(user): Stop if we overflow int128 max! Note that because we scale
1505 // things so that the max coeff is 2^52, this is unlikely.
1506 slack += absl::int128(term.coeff.value()) *
1507 absl::int128(term.bound_diff.value());
1508 }
1509
1510 // If the rhs is now negative, we have a cut.
1511 //
1512 // Note(user): past this point, now that a given "base" cover has been chosen,
1513 // we basically compute the cut (of the form sum X <= bound) with the maximum
1514 // possible violation. Note also that we lift as much as possible, so we don't
1515 // necessarily optimize for the cut efficacity though. But we do get a
1516 // stronger cut.
1517 if (slack <= 0) {
1518 return 0;
1519 }
1520 if (cover_size == 0) return 0;
1521 return MinimizeCover<LargeCoeffFirst>(cover_size, slack);
1522}
1523
1524void CoverCutHelper::InitializeCut(const CutData& input_ct) {
1525 cut_ = input_ct;
1526 num_lifting_ = 0;
1527
1528 // We should have dealt with an infeasible constraint before.
1529 // Note that because of our scaling, it is unlikely we will overflow int128.
1530 CHECK_GE(cut_.rhs, 0);
1531 DCHECK(cut_.AllCoefficientsArePositive());
1532}
1533
1535 ImpliedBoundsProcessor* ib_processor) {
1536 InitializeCut(input_ct);
1537
1538 // Tricky: This only work because the cut absl128 rhs is not changed by these
1539 // operations.
1540 if (ib_processor != nullptr) {
1541 std::vector<CutTerm>* new_bool_terms =
1542 ib_processor->ClearedMutableTempTerms();
1543 for (CutTerm& term : cut_.terms) {
1544 // We only look at non-Boolean with an lp value not close to the upper
1545 // bound.
1546 if (term.bound_diff <= 1) continue;
1547 if (term.lp_value + 1e-4 > AsDouble(term.bound_diff)) continue;
1548
1549 if (ib_processor->TryToExpandWithLowerImpliedbound(
1550 IntegerValue(1),
1551 /*complement=*/false, &term, &cut_.rhs, new_bool_terms)) {
1552 ++cover_stats_.num_initial_ibs;
1553 }
1554 }
1555
1557 absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_);
1558 }
1559
1560 bool has_relevant_int = false;
1561 for (const CutTerm& term : cut_.terms) {
1562 if (term.HasRelevantLpValue() && term.bound_diff > 1) {
1563 has_relevant_int = true;
1564 break;
1565 }
1566 }
1567
1568 const int base_size = static_cast<int>(cut_.terms.size());
1569 const int cover_size =
1570 has_relevant_int
1571 ? GetCoverSize<LargeContribFirst, LargeCoeffFirst>(base_size)
1572 : GetCoverSizeForBooleans();
1573 if (!has_relevant_int && ib_processor == nullptr) {
1574 // If some implied bound substitution are possible, we do not cache anything
1575 // currently because the logic is currently sighlty different between the
1576 // two code. Fix?
1577 has_bool_base_ct_ = true;
1578 bool_cover_size_ = cover_size;
1579 if (cover_size == 0) return false;
1580 bool_base_ct_ = cut_;
1581 }
1582 if (cover_size == 0) return false;
1583
1584 // The cut is just obtained by complementing the variable in the cover and
1585 // applying the MIR super additive function.
1586 //
1587 // Note that since all coeff in the cover will now be negative. If we do no
1588 // scaling, and if we use max_coeff_in_cover to construct f(), they will be
1589 // mapped by f() to -1 and we get the classical cover inequality. With scaling
1590 // we can get a strictly dominating cut though.
1591 //
1592 // TODO(user): we don't have to pick max_coeff_in_cover and could use the
1593 // coefficient of the most fractional variable. Or like in the MIR code try
1594 // a few of them. Currently, the cut in the test is worse if we don't take
1595 // the max_coeff_in_cover though, so we need more understanding.
1596 //
1597 // TODO(user): It seems we could use a more advanced lifting function
1598 // described later in the paper. Investigate.
1599 IntegerValue best_coeff = 0;
1600 double best_score = -1.0;
1601 IntegerValue max_coeff_in_cover(0);
1602 for (int i = 0; i < cover_size; ++i) {
1603 CutTerm& term = cut_.terms[i];
1604 max_coeff_in_cover = std::max(max_coeff_in_cover, term.coeff);
1605 const double score =
1606 std::abs(term.lp_value - std::floor(term.lp_value + 1e-6)) *
1607 ToDouble(term.coeff);
1608 if (score > best_score) {
1609 best_score = score;
1610 best_coeff = term.coeff;
1611 }
1612 term.Complement(&cut_.rhs);
1613 }
1614 CHECK_LT(cut_.rhs, 0); // Because we complemented a cover.
1615
1616 // TODO(user): Experiment without this line that basically disable scoring.
1617 best_coeff = max_coeff_in_cover;
1618
1619 // TODO(user): experiment with different value of scaling and param t.
1620 std::function<IntegerValue(IntegerValue)> f;
1621 {
1622 IntegerValue max_magnitude = 0;
1623 for (const CutTerm& term : cut_.terms) {
1624 max_magnitude = std::max(max_magnitude, IntTypeAbs(term.coeff));
1625 }
1626 const IntegerValue max_scaling(std::min(
1627 IntegerValue(6000), FloorRatio(kMaxIntegerValue, max_magnitude)));
1628 const IntegerValue remainder = PositiveRemainder(cut_.rhs, best_coeff);
1629 f = GetSuperAdditiveRoundingFunction(remainder, best_coeff, IntegerValue(1),
1630 max_scaling);
1631 }
1632
1633 if (ib_processor != nullptr) {
1634 const auto [num_lb, num_ub, num_merges] =
1635 ib_processor->PostprocessWithImpliedBound(f, /*factor_t=*/1, &cut_);
1636 cover_stats_.num_lb_ibs += num_lb;
1637 cover_stats_.num_ub_ibs += num_ub;
1638 cover_stats_.num_merges += num_merges;
1639 }
1640
1641 // TODO(user): It might be better to merge terms in AMO before the cover
1642 // heuristic rather than after the fact?
1643 gub_helper_.ApplyWithPotentialBumpAndGUB(f, best_coeff, &cut_);
1644 cover_stats_.num_bumps += gub_helper_.last_num_bumps();
1645 cover_stats_.num_gubs += gub_helper_.last_num_gubs();
1646 cover_stats_.num_lifting += num_lifting_ = gub_helper_.last_num_lifts();
1647 ++cover_stats_.num_cuts;
1648 return true;
1649}
1650
1652 ImpliedBoundsProcessor* ib_processor) {
1653 InitializeCut(input_ct);
1654
1655 // TODO(user): Change the heuristic to depends on the lp_value of the implied
1656 // bounds. This way we can exactly match what happen in the old
1657 // FlowCoverCutHelper.
1658 const int base_size = static_cast<int>(cut_.terms.size());
1659 const int cover_size = GetCoverSize<KnapsackAdd, KnapsackRemove>(base_size);
1660 if (cover_size == 0) return false;
1661
1662 // After complementing the term in the cover, we have
1663 // sum -ci.X + other_terms <= -slack;
1664 for (int i = 0; i < cover_size; ++i) {
1665 cut_.terms[i].Complement(&cut_.rhs);
1666
1667 // We do not support complex terms, but we shouldn't get any.
1668 if (cut_.terms[i].expr_coeffs[1] != 0) return false;
1669 }
1670
1671 // The algorithm goes as follow:
1672 // - Compute heuristically a minimal cover.
1673 // - We have sum_cover ci.Xi >= slack where Xi is distance to upper bound.
1674 // - Apply coefficient strenghtening if ci > slack.
1675 //
1676 // Using implied bound we have two cases (all coeffs positive):
1677 // 1/ ci.Xi = ci.fi.Bi + ci.Si : always good.
1678 // 2/ ci.Xi = ci.di.Bi - ci.Si <= di.Bi: good if Si lp_value is zero.
1679 //
1680 // Note that if everything is Boolean, we just get a normal cover and coeff
1681 // strengthening just result in all coeff at 1, so worse than our cover
1682 // heuristic.
1683 CHECK_LT(cut_.rhs, 0);
1684 if (cut_.rhs <= absl::int128(std::numeric_limits<int64_t>::min())) {
1685 return false;
1686 }
1687
1688 bool has_large_coeff = false;
1689 for (const CutTerm& term : cut_.terms) {
1690 if (IntTypeAbs(term.coeff) > 1'000'000) {
1691 has_large_coeff = true;
1692 break;
1693 }
1694 }
1695
1696 // TODO(user): Shouldn't we just use rounding f() with maximum coeff to allows
1697 // lift of all other terms? but then except for the heuristic the cut is
1698 // really similar to the cover cut.
1699 const IntegerValue positive_rhs = -static_cast<int64_t>(cut_.rhs);
1700 IntegerValue min_magnitude = kMaxIntegerValue;
1701 for (int i = 0; i < cover_size; ++i) {
1702 const IntegerValue magnitude = IntTypeAbs(cut_.terms[i].coeff);
1703 min_magnitude = std::min(min_magnitude, magnitude);
1704 }
1705 const bool use_scaling =
1706 has_large_coeff || min_magnitude == 1 || min_magnitude >= positive_rhs;
1707 auto f = use_scaling ? GetSuperAdditiveStrengtheningMirFunction(
1708 positive_rhs, /*scaling=*/6000)
1710 min_magnitude);
1711
1712 if (ib_processor != nullptr) {
1713 const auto [num_lb, num_ub, num_merges] =
1714 ib_processor->PostprocessWithImpliedBound(f, /*factor_t=*/1, &cut_);
1715 flow_stats_.num_lb_ibs += num_lb;
1716 flow_stats_.num_ub_ibs += num_ub;
1717 flow_stats_.num_merges += num_merges;
1718 }
1719
1720 // Lifting.
1721 IntegerValue period = positive_rhs;
1722 {
1723 for (const CutTerm& term : cut_.terms) {
1724 if (term.coeff > 0) continue;
1725 period = std::max(period, -term.coeff);
1726 }
1727
1728 // Compute good period.
1729 // We don't want to extend it in the simpler case where f(x)=-1 if x < 0.
1730 //
1731 // TODO(user): If the Mir*() function is used, we don't need to extend that
1732 // much the period. Fix.
1733 if (f(-period + FloorRatio(period, 2)) != f(-period)) {
1734 // TODO(user): do exact binary search to find highest x in
1735 // [-max_neg_magnitude, 0] such that f(x) == f(-max_neg_magnitude) ? not
1736 // really needed though since we know that we have this equality:
1737 CHECK_EQ(f(-period), f(-positive_rhs));
1738 period = std::max(period, CapProdI(2, positive_rhs) - 1);
1739 }
1740
1741 f = ExtendNegativeFunction(f, period);
1742 }
1743
1744 // Generate the cut.
1745 gub_helper_.ApplyWithPotentialBumpAndGUB(f, period, &cut_);
1746 flow_stats_.num_bumps += gub_helper_.last_num_bumps();
1747 flow_stats_.num_gubs += gub_helper_.last_num_gubs();
1748 flow_stats_.num_lifting += gub_helper_.last_num_lifts();
1749 ++flow_stats_.num_cuts;
1750 return true;
1751}
1752
1754 const CutData& input_ct, ImpliedBoundsProcessor* ib_processor) {
1755 int cover_size;
1756 if (has_bool_base_ct_) {
1757 // We already called GetCoverSizeForBooleans() and ib_processor was nullptr,
1758 // so reuse that info.
1759 CHECK(ib_processor == nullptr);
1760 cover_size = bool_cover_size_;
1761 if (cover_size == 0) return false;
1762 InitializeCut(bool_base_ct_);
1763 } else {
1764 InitializeCut(input_ct);
1765
1766 // Perform IB expansion with no restriction, all coeff should still be
1767 // positive.
1768 //
1769 // TODO(user): Merge Boolean terms that are complement of each other.
1770 if (ib_processor != nullptr) {
1771 std::vector<CutTerm>* new_bool_terms =
1772 ib_processor->ClearedMutableTempTerms();
1773 for (CutTerm& term : cut_.terms) {
1774 if (term.bound_diff <= 1) continue;
1775 if (ib_processor->TryToExpandWithLowerImpliedbound(
1776 IntegerValue(1),
1777 /*complement=*/false, &term, &cut_.rhs, new_bool_terms)) {
1778 ++ls_stats_.num_initial_ibs;
1779 }
1780 }
1781
1783 absl::MakeSpan(*new_bool_terms), IntegerValue(1), &cut_);
1784 }
1785
1786 // TODO(user): we currently only deal with Boolean in the cover. Fix.
1787 cover_size = GetCoverSizeForBooleans();
1788 }
1789 if (cover_size == 0) return false;
1790
1791 // We don't support big rhs here.
1792 // Note however than since this only deal with Booleans, it is less likely.
1793 if (cut_.rhs > absl::int128(std::numeric_limits<int64_t>::max())) {
1794 ++ls_stats_.num_overflow_aborts;
1795 return false;
1796 }
1797 const IntegerValue rhs = static_cast<int64_t>(cut_.rhs);
1798
1799 // Collect the weight in the cover.
1800 IntegerValue sum(0);
1801 std::vector<IntegerValue> cover_weights;
1802 for (int i = 0; i < cover_size; ++i) {
1803 CHECK_EQ(cut_.terms[i].bound_diff, 1);
1804 CHECK_GT(cut_.terms[i].coeff, 0);
1805 cover_weights.push_back(cut_.terms[i].coeff);
1806 sum = CapAddI(sum, cut_.terms[i].coeff);
1807 }
1808 if (AtMinOrMaxInt64(sum.value())) {
1809 ++ls_stats_.num_overflow_aborts;
1810 return false;
1811 }
1812 CHECK_GT(sum, rhs);
1813
1814 // Compute the correct threshold so that if we round down larger weights to
1815 // p/q. We have sum of the weight in cover == base_rhs.
1816 IntegerValue p(0);
1817 IntegerValue q(0);
1818 IntegerValue previous_sum(0);
1819 std::sort(cover_weights.begin(), cover_weights.end());
1820 for (int i = 0; i < cover_size; ++i) {
1821 q = IntegerValue(cover_weights.size() - i);
1822 if (previous_sum + cover_weights[i] * q > rhs) {
1823 p = rhs - previous_sum;
1824 break;
1825 }
1826 previous_sum += cover_weights[i];
1827 }
1828 CHECK_GE(q, 1);
1829
1830 // Compute thresholds.
1831 // For the first q values, thresholds[i] is the smallest integer such that
1832 // q * threshold[i] > p * (i + 1).
1833 std::vector<IntegerValue> thresholds;
1834 for (int i = 0; i < q; ++i) {
1835 // TODO(user): compute this in an overflow-safe way.
1836 if (CapProd(p.value(), i + 1) >= std::numeric_limits<int64_t>::max() - 1) {
1837 ++ls_stats_.num_overflow_aborts;
1838 return false;
1839 }
1840 thresholds.push_back(CeilRatio(p * (i + 1) + 1, q));
1841 }
1842
1843 // For the other values, we just add the weights.
1844 std::reverse(cover_weights.begin(), cover_weights.end());
1845 for (int i = q.value(); i < cover_size; ++i) {
1846 thresholds.push_back(thresholds.back() + cover_weights[i]);
1847 }
1848 CHECK_EQ(thresholds.back(), rhs + 1);
1849
1850 // Generate the cut.
1851 //
1852 // Our algo is quadratic in worst case, but large coefficients should be
1853 // rare, and in practice we don't really see this.
1854 //
1855 // Note that this work for non-Boolean since we can just "theorically" split
1856 // them as a sum of Booleans :) Probably a cleaner proof exist by just using
1857 // the super-additivity of the lifting function on [0, rhs].
1858 temp_cut_.rhs = cover_size - 1;
1859 temp_cut_.terms.clear();
1860
1861 const int base_size = static_cast<int>(cut_.terms.size());
1862 for (int i = 0; i < base_size; ++i) {
1863 const CutTerm& term = cut_.terms[i];
1864 const IntegerValue coeff = term.coeff;
1865 IntegerValue cut_coeff(1);
1866 if (coeff < thresholds[0]) {
1867 if (i >= cover_size) continue;
1868 } else {
1869 // Find the largest index <= coeff.
1870 //
1871 // TODO(user): For exact multiple of p/q we can increase the coeff by 1/2.
1872 // See section in the paper on getting maximal super additive function.
1873 for (int i = 1; i < cover_size; ++i) {
1874 if (coeff < thresholds[i]) break;
1875 cut_coeff = IntegerValue(i + 1);
1876 }
1877 if (cut_coeff != 0 && i >= cover_size) ++ls_stats_.num_lifting;
1878 if (cut_coeff > 1 && i < cover_size) ++ls_stats_.num_lifting; // happen?
1879 }
1880
1881 temp_cut_.terms.push_back(term);
1882 temp_cut_.terms.back().coeff = cut_coeff;
1883 }
1884
1885 cut_ = temp_cut_;
1886 ++ls_stats_.num_cuts;
1887 return true;
1888}
1889
1891 if (!VLOG_IS_ON(1)) return;
1892 std::vector<std::pair<std::string, int64_t>> stats;
1893 stats.push_back({"bool_rlt/num_tried", num_tried_});
1894 stats.push_back({"bool_rlt/num_tried_factors", num_tried_factors_});
1895 shared_stats_->AddStats(stats);
1896}
1897
1898void BoolRLTCutHelper::Initialize(absl::Span<const IntegerVariable> lp_vars) {
1899 product_detector_->InitializeBooleanRLTCuts(lp_vars, *lp_values_);
1900 enabled_ = !product_detector_->BoolRLTCandidates().empty();
1901}
1902
1903// TODO(user): do less work, add more stats.
1905 if (!enabled_) return false;
1906
1907 ++num_tried_;
1908 DCHECK(input_ct.AllCoefficientsArePositive());
1909
1910 // We will list the interesting "factor" to try to multiply + linearize the
1911 // input constraint with.
1912 absl::flat_hash_set<IntegerVariable> to_try_set;
1913 std::vector<IntegerVariable> to_try;
1914
1915 // We can look at the linearized factor * term and bound the activity delta
1916 // rescaled by 1 / factor.
1917 //
1918 // CASE linearized_term delta = term/factor - previous
1919 //
1920 // DROP, 0 0 - X
1921 // MC_CORMICK, factor * ub - (ub - X) (ub - X) * (1 - 1/factor) <= 0
1922 // SQUARE, factor=X 1 - X
1923 // RLT, factor - P 1 - X - P/X <= 1 - X
1924 //
1925 // TODO(user): detect earlier that a factor is not worth checking because
1926 // we already loose too much with the DROP/MC_CORMICK cases ? Filter more ?
1927 // I think we can probably evaluate the factor efficiency during the first
1928 // loop which usually have a small complexity compared to num_factor_to_try
1929 // times num filtered terms.
1930 filtered_input_.terms.clear();
1931 filtered_input_.rhs = input_ct.rhs;
1932
1933 const auto& candidates = product_detector_->BoolRLTCandidates();
1934 for (const CutTerm& term : input_ct.terms) {
1935 // The only options are DROP or MC_CORMICK, but the later will unlikely win.
1936 //
1937 // TODO(user): we never use factor with lp value < 1e-4, but we could use a
1938 // factor equal to 1.0 I think. Double check.
1939 if (!term.IsBoolean() && term.lp_value <= 1e-6) {
1940 continue;
1941 }
1942
1943 // Here the MC_CORMICK will not loose much. And SQUARE or RLT cannot win
1944 // much, so we can assume there is no loss and just look for violated
1945 // subconstraint.
1946 if (term.LpDistToMaxValue() <= 1e-6) {
1947 filtered_input_.rhs -= absl::int128(term.coeff.value()) *
1948 absl::int128(term.bound_diff.value());
1949 continue;
1950 }
1951
1952 // Convert to var or -var (to mean 1 - var).
1953 //
1954 // TODO(user): We could keep for each factor the max gain, so that we
1955 // can decided if it is not even worth trying a factor.
1956 const IntegerVariable var = term.GetUnderlyingLiteralOrNone();
1957 if (var != kNoIntegerVariable && candidates.contains(NegationOf(var))) {
1958 for (const IntegerVariable factor : candidates.at(NegationOf(var))) {
1959 if (to_try_set.insert(factor).second) to_try.push_back(factor);
1960 }
1961 }
1962 filtered_input_.terms.push_back(term);
1963 }
1964
1965 // TODO(user): Avoid constructing the cut just to evaluate its efficacy.
1966 double best_efficacy = 1e-3;
1967 IntegerVariable best_factor = kNoIntegerVariable;
1968 for (const IntegerVariable factor : to_try) {
1969 ++num_tried_factors_;
1970 if (!TryProduct(factor, filtered_input_)) continue;
1971 const double efficacy = cut_.ComputeEfficacy();
1972 if (efficacy > best_efficacy) {
1973 best_efficacy = efficacy;
1974 best_factor = factor;
1975 }
1976 }
1977
1978 // If we found a good factor, applies it to the non-filtered base constraint.
1979 if (best_factor != kNoIntegerVariable) {
1980 return TryProduct(best_factor, input_ct);
1981 }
1982 return false;
1983}
1984
1985namespace {
1986
1987// Each bool * term can be linearized in a couple of way.
1988// We will choose the best one.
1989enum class LinearizationOption {
1990 DROP,
1991 MC_CORMICK,
1992 RLT,
1993 SQUARE,
1994};
1995
1996} // namespace
1997
1998double BoolRLTCutHelper::GetLiteralLpValue(IntegerVariable var) const {
1999 if (VariableIsPositive(var)) return (*lp_values_)[var];
2000 return 1.0 - (*lp_values_)[PositiveVariable(var)];
2001}
2002
2003bool BoolRLTCutHelper::TryProduct(IntegerVariable factor,
2004 const CutData& input) {
2005 cut_.terms.clear();
2006 cut_.rhs = 0;
2007 absl::int128 old_rhs = input.rhs;
2008
2009 const double factor_lp = GetLiteralLpValue(factor);
2010
2011 // Consider each term in sequence and linearize them.
2012 for (CutTerm term : input.terms) {
2013 LinearizationOption best_option = LinearizationOption::DROP;
2014
2015 // Recover the "IntegerVariable" literal if any.
2016 const IntegerVariable var = term.GetUnderlyingLiteralOrNone();
2017 const bool is_literal = var != kNoIntegerVariable;
2018
2019 // First option is if factor == var.
2020 if (factor == var) {
2021 // The term can be left unchanged.
2022 // Note that we "win" (factor_lp - factor_lp ^ 2) * coeff activity.
2023 best_option = LinearizationOption::SQUARE;
2024 cut_.terms.push_back(term);
2025 continue;
2026 }
2027
2028 // If factor == NegationOf(var) our best linearization is unfortunately
2029 // just product >= 0.
2030 if (NegationOf(factor) == var) {
2031 best_option = LinearizationOption::DROP;
2032 continue;
2033 }
2034
2035 // TODO(user): If l implies x and y, we have x * y >= l.
2036 // We have to choose l as high as possible if multiple choices.
2037
2038 // We start by the lp value for the drop option: simply dropping the term
2039 // since we know it is >= 0. We will choose the option with the highest
2040 // lp value, which is the one that "loose" the least activity.
2041 double best_lp = 0.0;
2042
2043 // Second option, is complement it and use x * (1 - y) <= (1 - y):
2044 // x * [1 - (1 - y)] = x - x * (1 - y) >= x - (1 - y)
2045 const double complement_lp =
2046 static_cast<double>(term.bound_diff.value()) - term.lp_value;
2047 const double mc_cormick_lp = factor_lp - complement_lp;
2048 if (mc_cormick_lp > best_lp) {
2049 best_option = LinearizationOption::MC_CORMICK;
2050 best_lp = mc_cormick_lp;
2051 }
2052
2053 // Last option is complement it and use a relation x * (1 - y) <= u.
2054 // so the lp is x - u. Note that this can be higher than x * y if the
2055 // bilinear relation is violated by the lp solution.
2056 if (is_literal) {
2057 // TODO(user): only consider variable within current lp.
2058 const IntegerVariable ub_lit =
2059 product_detector_->LiteralProductUpperBound(factor, NegationOf(var));
2060 if (ub_lit != kNoIntegerVariable) {
2061 const double lit_lp = GetLiteralLpValue(ub_lit);
2062 if (factor_lp - lit_lp > best_lp) {
2063 // We do it right away since we have all we need.
2064 best_option = LinearizationOption::RLT;
2065
2066 // First complement to update rhs.
2067 term.Complement(&old_rhs);
2068
2069 // Now we replace the term data.
2070 term.lp_value = lit_lp;
2071 term.ReplaceExpressionByLiteral(ub_lit);
2072 cut_.terms.push_back(term);
2073 continue;
2074 }
2075 }
2076 }
2077
2078 if (best_option == LinearizationOption::DROP) continue;
2079
2080 // We just keep the complemented term.
2081 CHECK(best_option == LinearizationOption::MC_CORMICK);
2082 term.Complement(&old_rhs);
2083 cut_.terms.push_back(term);
2084 }
2085
2086 // Finally we add the - factor * old_rhs term.
2087 // We can only do that if the old_rhs is not too big.
2088 //
2089 // TODO(user): Avoid right away large constraint coming from gomory...
2090 const absl::int128 limit(int64_t{1} << 50);
2091 if (old_rhs > limit || old_rhs < -limit) return false;
2092
2093 CutTerm term;
2094 term.coeff = -IntegerValue(static_cast<int64_t>(old_rhs));
2095 term.lp_value = factor_lp;
2096 term.bound_diff = 1;
2097 term.ReplaceExpressionByLiteral(factor);
2098 cut_.terms.push_back(term);
2099
2100 return true;
2101}
2102
2106 int linearization_level,
2107 Model* model) {
2108 CutGenerator result;
2109 if (z.var != kNoIntegerVariable) result.vars.push_back(z.var);
2110 if (x.var != kNoIntegerVariable) result.vars.push_back(x.var);
2111 if (y.var != kNoIntegerVariable) result.vars.push_back(y.var);
2112
2113 IntegerTrail* const integer_trail = model->GetOrCreate<IntegerTrail>();
2114 Trail* trail = model->GetOrCreate<Trail>();
2115
2116 result.generate_cuts = [z, x, y, linearization_level, model, trail,
2117 integer_trail](LinearConstraintManager* manager) {
2118 if (trail->CurrentDecisionLevel() > 0 && linearization_level == 1) {
2119 return true;
2120 }
2121 const int64_t x_lb = integer_trail->LevelZeroLowerBound(x).value();
2122 const int64_t x_ub = integer_trail->LevelZeroUpperBound(x).value();
2123 const int64_t y_lb = integer_trail->LevelZeroLowerBound(y).value();
2124 const int64_t y_ub = integer_trail->LevelZeroUpperBound(y).value();
2125
2126 // if x or y are fixed, the McCormick equations are exact.
2127 if (x_lb == x_ub || y_lb == y_ub) return true;
2128
2129 // Check for overflow with the product of expression bounds and the
2130 // product of one expression bound times the constant part of the other
2131 // expression.
2132 const int64_t x_max_amp = std::max(std::abs(x_lb), std::abs(x_ub));
2133 const int64_t y_max_amp = std::max(std::abs(y_lb), std::abs(y_ub));
2134 constexpr int64_t kMaxSafeInteger = (int64_t{1} << 53) - 1;
2135 if (CapProd(y_max_amp, x_max_amp) > kMaxSafeInteger) return true;
2136 if (CapProd(y_max_amp, std::abs(x.constant.value())) > kMaxSafeInteger) {
2137 return true;
2138 }
2139 if (CapProd(x_max_amp, std::abs(y.constant.value())) > kMaxSafeInteger) {
2140 return true;
2141 }
2142
2143 const auto& lp_values = manager->LpValues();
2144 const double x_lp_value = x.LpValue(lp_values);
2145 const double y_lp_value = y.LpValue(lp_values);
2146 const double z_lp_value = z.LpValue(lp_values);
2147
2148 // TODO(user): As the bounds change monotonically, these cuts
2149 // dominate any previous one. try to keep a reference to the cut and
2150 // replace it. Alternatively, add an API for a level-zero bound change
2151 // callback.
2152
2153 // Cut -z + x_coeff * x + y_coeff* y <= rhs
2154 auto try_add_above_cut = [&](int64_t x_coeff, int64_t y_coeff,
2155 int64_t rhs) {
2156 if (-z_lp_value + x_lp_value * x_coeff + y_lp_value * y_coeff >=
2157 rhs + kMinCutViolation) {
2158 LinearConstraintBuilder cut(model, /*lb=*/kMinIntegerValue,
2159 /*ub=*/IntegerValue(rhs));
2160 cut.AddTerm(z, IntegerValue(-1));
2161 if (x_coeff != 0) cut.AddTerm(x, IntegerValue(x_coeff));
2162 if (y_coeff != 0) cut.AddTerm(y, IntegerValue(y_coeff));
2163 manager->AddCut(cut.Build(), "PositiveProduct");
2164 }
2165 };
2166
2167 // Cut -z + x_coeff * x + y_coeff* y >= rhs
2168 auto try_add_below_cut = [&](int64_t x_coeff, int64_t y_coeff,
2169 int64_t rhs) {
2170 if (-z_lp_value + x_lp_value * x_coeff + y_lp_value * y_coeff <=
2171 rhs - kMinCutViolation) {
2172 LinearConstraintBuilder cut(model, /*lb=*/IntegerValue(rhs),
2173 /*ub=*/kMaxIntegerValue);
2174 cut.AddTerm(z, IntegerValue(-1));
2175 if (x_coeff != 0) cut.AddTerm(x, IntegerValue(x_coeff));
2176 if (y_coeff != 0) cut.AddTerm(y, IntegerValue(y_coeff));
2177 manager->AddCut(cut.Build(), "PositiveProduct");
2178 }
2179 };
2180
2181 // McCormick relaxation of bilinear constraints. These 4 cuts are the
2182 // exact facets of the x * y polyhedron for a bounded x and y.
2183 //
2184 // Each cut correspond to plane that contains two of the line
2185 // (x=x_lb), (x=x_ub), (y=y_lb), (y=y_ub). The easiest to
2186 // understand them is to draw the x*y curves and see the 4
2187 // planes that correspond to the convex hull of the graph.
2188 try_add_above_cut(y_lb, x_lb, x_lb * y_lb);
2189 try_add_above_cut(y_ub, x_ub, x_ub * y_ub);
2190 try_add_below_cut(y_ub, x_lb, x_lb * y_ub);
2191 try_add_below_cut(y_lb, x_ub, x_ub * y_lb);
2192 return true;
2193 };
2194
2195 return result;
2196}
2197
2199 AffineExpression square,
2200 IntegerValue x_lb,
2201 IntegerValue x_ub, Model* model) {
2202 const IntegerValue above_slope = x_ub + x_lb;
2203 LinearConstraintBuilder above_hyperplan(model, kMinIntegerValue,
2204 -x_lb * x_ub);
2205 above_hyperplan.AddTerm(square, 1);
2206 above_hyperplan.AddTerm(x, -above_slope);
2207 return above_hyperplan.Build();
2208}
2209
2211 AffineExpression square,
2212 IntegerValue x_value,
2213 Model* model) {
2214 const IntegerValue below_slope = 2 * x_value + 1;
2215 LinearConstraintBuilder below_hyperplan(model, -x_value - x_value * x_value,
2217 below_hyperplan.AddTerm(square, 1);
2218 below_hyperplan.AddTerm(x, -below_slope);
2219 return below_hyperplan.Build();
2220}
2221
2223 int linearization_level, Model* model) {
2224 CutGenerator result;
2225 if (x.var != kNoIntegerVariable) result.vars.push_back(x.var);
2226 if (y.var != kNoIntegerVariable) result.vars.push_back(y.var);
2227
2228 Trail* trail = model->GetOrCreate<Trail>();
2229 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2230 result.generate_cuts = [y, x, linearization_level, trail, integer_trail,
2231 model](LinearConstraintManager* manager) {
2232 if (trail->CurrentDecisionLevel() > 0 && linearization_level == 1) {
2233 return true;
2234 }
2235 const IntegerValue x_ub = integer_trail->LevelZeroUpperBound(x);
2236 const IntegerValue x_lb = integer_trail->LevelZeroLowerBound(x);
2237 DCHECK_GE(x_lb, 0);
2238
2239 if (x_lb == x_ub) return true;
2240
2241 // Check for potential overflows.
2242 if (x_ub > (int64_t{1} << 31)) return true;
2243 DCHECK_GE(x_lb, 0);
2244 manager->AddCut(ComputeHyperplanAboveSquare(x, y, x_lb, x_ub, model),
2245 "SquareUpper");
2246
2247 const IntegerValue x_floor =
2248 static_cast<int64_t>(std::floor(x.LpValue(manager->LpValues())));
2249 manager->AddCut(ComputeHyperplanBelowSquare(x, y, x_floor, model),
2250 "SquareLower");
2251 return true;
2252 };
2253
2254 return result;
2255}
2256
2257ImpliedBoundsProcessor::BestImpliedBoundInfo
2259 auto it = cache_.find(var);
2260 if (it != cache_.end()) {
2261 BestImpliedBoundInfo result = it->second;
2262 if (result.bool_var == kNoIntegerVariable) return BestImpliedBoundInfo();
2263 if (integer_trail_->IsFixed(result.bool_var)) return BestImpliedBoundInfo();
2264 return result;
2265 }
2266 return BestImpliedBoundInfo();
2267}
2268
2270ImpliedBoundsProcessor::ComputeBestImpliedBound(
2271 IntegerVariable var,
2273 auto it = cache_.find(var);
2274 if (it != cache_.end()) return it->second;
2275 BestImpliedBoundInfo result;
2276 double result_slack_lp_value = std::numeric_limits<double>::infinity();
2277 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(var);
2278 for (const ImpliedBoundEntry& entry :
2279 implied_bounds_->GetImpliedBounds(var)) {
2280 // Only process entries with a Boolean variable currently part of the LP
2281 // we are considering for this cut.
2282 //
2283 // TODO(user): the more we use cuts, the less it make sense to have a
2284 // lot of small independent LPs.
2285 if (!lp_vars_.contains(PositiveVariable(entry.literal_view))) {
2286 continue;
2287 }
2288
2289 // The equation is X = lb + diff * Bool + Slack where Bool is in [0, 1]
2290 // and slack in [0, ub - lb].
2291 const IntegerValue diff = entry.lower_bound - lb;
2292 CHECK_GE(diff, 0);
2293 const double bool_lp_value =
2294 VariableIsPositive(entry.literal_view)
2295 ? lp_values[entry.literal_view]
2296 : 1.0 - lp_values[PositiveVariable(entry.literal_view)];
2297 const double slack_lp_value =
2298 lp_values[var] - ToDouble(lb) - bool_lp_value * ToDouble(diff);
2299
2300 // If the implied bound equation is not respected, we just add it
2301 // to implied_bound_cuts, and skip the entry for now.
2302 if (slack_lp_value < -1e-4) {
2303 LinearConstraint ib_cut;
2304 ib_cut.lb = kMinIntegerValue;
2305 std::vector<std::pair<IntegerVariable, IntegerValue>> terms;
2306 if (VariableIsPositive(entry.literal_view)) {
2307 // X >= Indicator * (bound - lb) + lb
2308 terms.push_back({entry.literal_view, diff});
2309 terms.push_back({var, IntegerValue(-1)});
2310 ib_cut.ub = -lb;
2311 } else {
2312 // X >= -Indicator * (bound - lb) + bound
2313 terms.push_back({PositiveVariable(entry.literal_view), -diff});
2314 terms.push_back({var, IntegerValue(-1)});
2315 ib_cut.ub = -entry.lower_bound;
2316 }
2317 CleanTermsAndFillConstraint(&terms, &ib_cut);
2318 ib_cut_pool_.AddCut(std::move(ib_cut), "IB", lp_values);
2319 continue;
2320 }
2321
2322 // We look for tight implied bounds, and amongst the tightest one, we
2323 // prefer larger coefficient in front of the Boolean.
2324 if (slack_lp_value + 1e-4 < result_slack_lp_value ||
2325 (slack_lp_value < result_slack_lp_value + 1e-4 &&
2326 entry.lower_bound > result.implied_bound)) {
2327 result_slack_lp_value = slack_lp_value;
2328 result.var_lp_value = lp_values[var];
2329 result.bool_lp_value = bool_lp_value;
2330 result.implied_bound = entry.lower_bound;
2331 result.bool_var = entry.literal_view;
2332 }
2333 }
2334 cache_[var] = result;
2335 return result;
2336}
2337
2340 cache_.clear();
2341 for (const IntegerVariable var :
2342 implied_bounds_->VariablesWithImpliedBounds()) {
2343 if (!lp_vars_.contains(PositiveVariable(var))) continue;
2344 ComputeBestImpliedBound(var, lp_values);
2345 }
2346}
2347
2349 const CutTerm& term, IntegerValue factor_t, CutTerm& bool_term,
2350 CutTerm& slack_term) {
2351 // We only want to expand non-Boolean and non-slack term!
2352 if (term.bound_diff <= 1) return false;
2353 if (!term.IsSimple()) return false;
2354 DCHECK_EQ(IntTypeAbs(term.expr_coeffs[0]), 1);
2355
2356 // Try lower bounded direction for implied bound.
2357 // This kind should always be beneficial if it exists:
2358 //
2359 // Because X = bound_diff * B + S
2360 // We can replace coeff * X by the expression before applying f:
2361 // = f(coeff * bound_diff) * B + f(coeff) * [X - bound_diff * B]
2362 // = f(coeff) * X + (f(coeff * bound_diff) - f(coeff) * bound_diff] * B
2363 // So we can "lift" B into the cut with a non-negative coefficient.
2364 //
2365 // Note that this lifting is really the same as if we used that implied
2366 // bound before since in f(coeff * bound_diff) * B + f(coeff) * S, if we
2367 // replace S by its value [X - bound_diff * B] we get the same result.
2368 //
2369 // TODO(user): Ignore if bound_diff == 1 ? But we can still merge B with
2370 // another entry if it exists, so it can still be good in this case.
2371 //
2372 // TODO(user): Only do it if coeff_b > 0 ? But again we could still merge
2373 // B with an existing Boolean for a better cut even if coeff_b == 0.
2374 if (term.cached_implied_lb < 0) return false;
2375 const BestImpliedBoundInfo info = cached_data_[term.cached_implied_lb];
2376 const IntegerValue lb = -term.expr_offset;
2377 const IntegerValue bound_diff = info.implied_bound - lb;
2378 if (bound_diff <= 0) return false;
2379 if (ProdOverflow(factor_t, CapProdI(term.coeff, bound_diff))) return false;
2380
2381 // We have X/-X = info.diff * Boolean + slack.
2382 bool_term.coeff = term.coeff * bound_diff;
2383 bool_term.expr_vars[0] = PositiveVariable(info.bool_var);
2384 bool_term.expr_coeffs[1] = 0;
2385 bool_term.bound_diff = IntegerValue(1);
2386 bool_term.lp_value = info.bool_lp_value;
2387 if (VariableIsPositive(info.bool_var)) {
2388 bool_term.expr_coeffs[0] = IntegerValue(1);
2389 bool_term.expr_offset = IntegerValue(0);
2390 } else {
2391 bool_term.expr_coeffs[0] = IntegerValue(-1);
2392 bool_term.expr_offset = IntegerValue(1);
2393 }
2394
2395 // Create slack.
2396 // The expression is term.exp - bound_diff * bool_term
2397 // The variable shouldn't be the same.
2398 DCHECK_NE(term.expr_vars[0], bool_term.expr_vars[0]);
2399 slack_term.expr_vars[0] = term.expr_vars[0];
2400 slack_term.expr_coeffs[0] = term.expr_coeffs[0];
2401 slack_term.expr_vars[1] = bool_term.expr_vars[0];
2402 slack_term.expr_coeffs[1] = -bound_diff * bool_term.expr_coeffs[0];
2403 slack_term.expr_offset =
2404 term.expr_offset - bound_diff * bool_term.expr_offset;
2405
2406 slack_term.lp_value = info.SlackLpValue(lb);
2407 slack_term.coeff = term.coeff;
2408 slack_term.bound_diff = term.bound_diff;
2409
2410 return true;
2411}
2412
2413// We use the fact that calling DecomposeWithImpliedLowerBound() with
2414// term.Complement() give us almost what we want. You have
2415// -complement(X) = -diff.B - slack
2416// - (diff - X) = -diff.(1 -(1- B)) - slack
2417// X = diff.(1 - B) - slack;
2419 const CutTerm& term, IntegerValue factor_t, CutTerm& bool_term,
2420 CutTerm& slack_term) {
2421 absl::int128 unused = 0;
2422 CutTerm complement = term;
2423 complement.Complement(&unused);
2424 if (!DecomposeWithImpliedLowerBound(complement, factor_t, bool_term,
2425 slack_term)) {
2426 return false;
2427 }
2428 // This is required not to have a constant term which might mess up our cut
2429 // heuristics.
2430 if (IntTypeAbs(bool_term.coeff) !=
2432 return false;
2433 }
2434 bool_term.Complement(&unused);
2435 CHECK_EQ(unused, absl::int128(0));
2436 return true;
2437}
2438
2440 const std::function<IntegerValue(IntegerValue)>& f, IntegerValue factor_t,
2441 CutData* cut) {
2442 int num_applied_lb = 0;
2443 int num_applied_ub = 0;
2444
2445 CutTerm bool_term;
2446 CutTerm slack_term;
2447 CutTerm ub_bool_term;
2448 CutTerm ub_slack_term;
2449
2450 tmp_terms_.clear();
2451 for (CutTerm& term : cut->terms) {
2452 if (term.bound_diff <= 1) continue;
2453 if (!term.IsSimple()) continue;
2454
2455 // Score is just the final lp value.
2456 // Higher is better since it is a <= constraint.
2457 double base_score;
2458 bool expand = false;
2459 if (DecomposeWithImpliedLowerBound(term, factor_t, bool_term, slack_term)) {
2460 // This side is always good.
2461 // c.X = c.d.B + c.S
2462 // applying f to the result we have f(c.d).B + f(c).[X - d.B]
2463 // which give f(c).X + [f(c.d) - f(c).d].B
2464 // and the second term is always positive by super-additivity.
2465 expand = true;
2466 base_score = AsDouble(f(bool_term.coeff)) * bool_term.lp_value +
2467 AsDouble(f(slack_term.coeff)) * slack_term.lp_value;
2468 } else {
2469 base_score = AsDouble(f(term.coeff)) * term.lp_value;
2470 }
2471
2472 // Test if it is better to use this "bad" side.
2473 //
2474 // Use the implied bound on (-X) if it is beneficial to do so.
2475 // Like complementing, this is not always good.
2476 //
2477 // We have comp(X) = diff - X = diff * B + S
2478 // X = diff * (1 - B) - S.
2479 // So if we applies f, we will get:
2480 // f(coeff * diff) * (1 - B) + f(-coeff) * S
2481 // and substituing S = diff * (1 - B) - X, we get:
2482 // -f(-coeff) * X + [f(coeff * diff) + f(-coeff) * diff] (1 - B).
2483 //
2484 // TODO(user): Note that while the violation might be higher, if the slack
2485 // becomes large this will result in a less powerfull cut. Shall we do
2486 // that? It is a bit the same problematic with complementing.
2487 //
2488 // TODO(user): If the slack is close to zero, then this transformation
2489 // will always increase the violation. So we could potentially do it in
2490 // Before our divisor selection heuristic. But the norm of the final cut
2491 // will increase too.
2492 if (DecomposeWithImpliedUpperBound(term, factor_t, ub_bool_term,
2493 ub_slack_term)) {
2494 const double score =
2495 AsDouble(f(ub_bool_term.coeff)) * ub_bool_term.lp_value +
2496 AsDouble(f(ub_slack_term.coeff)) * ub_slack_term.lp_value;
2497 // Note that because the slack is of the opposite sign, we might
2498 // loose more, so we prefer to be a bit defensive.
2499 if (score > base_score + 1e-2) {
2500 ++num_applied_ub;
2501 term = ub_slack_term;
2502 tmp_terms_.push_back(ub_bool_term);
2503 continue;
2504 }
2505 }
2506
2507 if (expand) {
2508 ++num_applied_lb;
2509 term = slack_term;
2510 tmp_terms_.push_back(bool_term);
2511 }
2512 }
2513
2514 const int num_merges = cut_builder_.AddOrMergeBooleanTerms(
2515 absl::MakeSpan(tmp_terms_), factor_t, cut);
2516
2517 return {num_applied_lb, num_applied_ub, num_merges};
2518}
2519
2521 IntegerValue factor_t, bool complement, CutTerm* term, absl::int128* rhs,
2522 std::vector<CutTerm>* new_bool_terms) {
2523 CutTerm bool_term;
2524 CutTerm slack_term;
2525 if (!DecomposeWithImpliedLowerBound(*term, factor_t, bool_term, slack_term)) {
2526 return false;
2527 }
2528
2529 // It should be good to use IB, but sometime we have things like
2530 // 7.3 = 2 * bool@1 + 5.3 and the expanded Boolean is at its upper bound.
2531 // It is always good to complement such variable.
2532 //
2533 // Note that here we do more and just complement anything closer to UB.
2534 if (complement) {
2535 if (bool_term.lp_value > 0.5) {
2536 bool_term.Complement(rhs);
2537 }
2538 if (slack_term.lp_value > 0.5 * AsDouble(slack_term.bound_diff)) {
2539 slack_term.Complement(rhs);
2540 }
2541 }
2542
2543 *term = slack_term;
2544 new_bool_terms->push_back(bool_term);
2545 return true;
2546}
2547
2548bool ImpliedBoundsProcessor::CacheDataForCut(IntegerVariable first_slack,
2549 CutData* cut) {
2550 cached_data_.clear();
2551
2552 const int size = cut->terms.size();
2553 for (int i = 0; i < size; ++i) {
2554 const CutTerm& term = cut->terms[i];
2555 if (!term.IsSimple()) continue;
2556 if (term.IsBoolean()) continue;
2557 if (term.expr_vars[0] >= first_slack) continue;
2558
2559 // Cache the BestImpliedBoundInfo if relevant.
2560 const IntegerVariable ib_var = term.expr_coeffs[0] > 0
2561 ? term.expr_vars[0]
2562 : NegationOf(term.expr_vars[0]);
2564 if (lb_info.bool_var != kNoIntegerVariable) {
2565 cut->terms[i].cached_implied_lb = cached_data_.size();
2566 cached_data_.emplace_back(std::move(lb_info));
2567 }
2568 BestImpliedBoundInfo ub_info =
2570 if (ub_info.bool_var != kNoIntegerVariable) {
2571 cut->terms[i].cached_implied_ub = cached_data_.size();
2572 cached_data_.emplace_back(std::move(ub_info));
2573 }
2574 }
2575
2576 return !cached_data_.empty();
2577}
2578
2580 min_values_.clear();
2581 expr_mins_.clear();
2582}
2583
2584void SumOfAllDiffLowerBounder::Add(const AffineExpression& expr, int num_exprs,
2585 const IntegerTrail& integer_trail) {
2586 expr_mins_.push_back(integer_trail.LevelZeroLowerBound(expr).value());
2587
2588 if (integer_trail.IsFixed(expr)) {
2589 min_values_.insert(integer_trail.FixedValue(expr));
2590 } else {
2591 if (expr.coeff > 0) {
2592 int count = 0;
2593 for (const IntegerValue value :
2594 integer_trail.InitialVariableDomain(expr.var).Values()) {
2595 min_values_.insert(expr.ValueAt(value));
2596 if (++count >= num_exprs) break;
2597 }
2598 } else {
2599 int count = 0;
2600 for (const IntegerValue value :
2601 integer_trail.InitialVariableDomain(expr.var).Negation().Values()) {
2602 min_values_.insert(-expr.ValueAt(value));
2603 if (++count >= num_exprs) break;
2604 }
2605 }
2606 }
2607}
2608
2610 int count = 0;
2611 IntegerValue sum = 0;
2612 for (const IntegerValue value : min_values_) {
2613 sum = CapAddI(sum, value);
2614 if (++count >= expr_mins_.size()) return sum;
2615 }
2616 return sum;
2617}
2618
2620 std::sort(expr_mins_.begin(), expr_mins_.end());
2621 IntegerValue tmp_value = kMinIntegerValue;
2622 IntegerValue result = 0;
2623 for (const IntegerValue value : expr_mins_) {
2624 // Make sure values are different.
2625 tmp_value = std::max(tmp_value + 1, value);
2626 result += tmp_value;
2627 }
2628 return result;
2629}
2630
2631IntegerValue SumOfAllDiffLowerBounder::GetBestLowerBound(std::string& suffix) {
2632 const IntegerValue domain_bound = SumOfMinDomainValues();
2633 const IntegerValue alldiff_bound = SumOfDifferentMins();
2634 if (domain_bound > alldiff_bound) {
2635 suffix = "d";
2636 return domain_bound;
2637 }
2638 suffix = alldiff_bound > domain_bound ? "a" : "e";
2639 return alldiff_bound;
2640}
2641
2642namespace {
2643
2644void TryToGenerateAllDiffCut(
2645 absl::Span<const std::pair<double, AffineExpression>> sorted_exprs_lp,
2646 const IntegerTrail& integer_trail,
2648 TopNCuts& top_n_cuts, Model* model) {
2649 const int num_exprs = sorted_exprs_lp.size();
2650
2651 std::vector<AffineExpression> current_set_exprs;
2652 SumOfAllDiffLowerBounder diff_mins;
2653 SumOfAllDiffLowerBounder negated_diff_maxes;
2654
2655 double sum = 0.0;
2656
2657 for (const auto& [expr_lp, expr] : sorted_exprs_lp) {
2658 sum += expr_lp;
2659 diff_mins.Add(expr, num_exprs, integer_trail);
2660 negated_diff_maxes.Add(expr.Negated(), num_exprs, integer_trail);
2661 current_set_exprs.push_back(expr);
2662 CHECK_EQ(current_set_exprs.size(), diff_mins.size());
2663 CHECK_EQ(current_set_exprs.size(), negated_diff_maxes.size());
2664 std::string min_suffix;
2665 const IntegerValue required_min_sum =
2666 diff_mins.GetBestLowerBound(min_suffix);
2667 std::string max_suffix;
2668 const IntegerValue required_max_sum =
2669 -negated_diff_maxes.GetBestLowerBound(max_suffix);
2670 if (required_max_sum == std::numeric_limits<IntegerValue>::max()) continue;
2671 DCHECK_LE(required_min_sum, required_max_sum);
2672 if (sum < ToDouble(required_min_sum) - kMinCutViolation ||
2673 sum > ToDouble(required_max_sum) + kMinCutViolation) {
2674 LinearConstraintBuilder cut(model, required_min_sum, required_max_sum);
2675 for (AffineExpression expr : current_set_exprs) {
2676 cut.AddTerm(expr, IntegerValue(1));
2677 }
2678 top_n_cuts.AddCut(cut.Build(),
2679 absl::StrCat("AllDiff_", min_suffix, max_suffix),
2680 lp_values);
2681
2682 // NOTE: We can extend the current set but it is more helpful to generate
2683 // the cut on a different set of variables so we reset the counters.
2684 sum = 0.0;
2685 current_set_exprs.clear();
2686 diff_mins.Clear();
2687 negated_diff_maxes.Clear();
2688 }
2689 }
2690}
2691
2692} // namespace
2693
2695 absl::Span<const AffineExpression> exprs, Model* model) {
2696 CutGenerator result;
2697 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2698
2699 for (const AffineExpression& expr : exprs) {
2700 if (!integer_trail->IsFixed(expr)) {
2701 result.vars.push_back(expr.var);
2702 }
2703 }
2705
2706 Trail* trail = model->GetOrCreate<Trail>();
2707 result.generate_cuts =
2708 [exprs = std::vector<AffineExpression>(exprs.begin(), exprs.end()),
2709 integer_trail, trail, model](LinearConstraintManager* manager) {
2710 // These cuts work at all levels but the generator adds too many cuts on
2711 // some instances and degrade the performance so we only use it at level
2712 // 0.
2713 if (trail->CurrentDecisionLevel() > 0) return true;
2714 const auto& lp_values = manager->LpValues();
2715 std::vector<std::pair<double, AffineExpression>> sorted_exprs;
2716 for (const AffineExpression expr : exprs) {
2717 if (integer_trail->LevelZeroLowerBound(expr) ==
2718 integer_trail->LevelZeroUpperBound(expr)) {
2719 continue;
2720 }
2721 sorted_exprs.push_back(std::make_pair(expr.LpValue(lp_values), expr));
2722 }
2723
2724 TopNCuts top_n_cuts(5);
2725 std::sort(sorted_exprs.begin(), sorted_exprs.end(),
2726 [](std::pair<double, AffineExpression>& a,
2727 const std::pair<double, AffineExpression>& b) {
2728 return a.first < b.first;
2729 });
2730 TryToGenerateAllDiffCut(sorted_exprs, *integer_trail, lp_values,
2731 top_n_cuts, model);
2732 // Other direction.
2733 std::reverse(sorted_exprs.begin(), sorted_exprs.end());
2734 TryToGenerateAllDiffCut(sorted_exprs, *integer_trail, lp_values,
2735 top_n_cuts, model);
2736 top_n_cuts.TransferToManager(manager);
2737 return true;
2738 };
2739 VLOG(2) << "Created all_diff cut generator of size: " << exprs.size();
2740 return result;
2741}
2742
2743namespace {
2744// Returns max((w2i - w1i)*Li, (w2i - w1i)*Ui).
2745IntegerValue MaxCornerDifference(const IntegerVariable var,
2746 const IntegerValue w1_i,
2747 const IntegerValue w2_i,
2748 const IntegerTrail& integer_trail) {
2749 const IntegerValue lb = integer_trail.LevelZeroLowerBound(var);
2750 const IntegerValue ub = integer_trail.LevelZeroUpperBound(var);
2751 return std::max((w2_i - w1_i) * lb, (w2_i - w1_i) * ub);
2752}
2753
2754// This is the coefficient of zk in the cut, where k = max_index.
2755// MPlusCoefficient_ki = max((wki - wI(i)i) * Li,
2756// (wki - wI(i)i) * Ui)
2757// = max corner difference for variable i,
2758// target expr I(i), max expr k.
2759// The coefficient of zk is Sum(i=1..n)(MPlusCoefficient_ki) + bk
2760IntegerValue MPlusCoefficient(
2761 absl::Span<const IntegerVariable> x_vars,
2762 absl::Span<const LinearExpression> exprs,
2763 const util_intops::StrongVector<IntegerVariable, int>& variable_partition,
2764 const int max_index, const IntegerTrail& integer_trail) {
2765 IntegerValue coeff = exprs[max_index].offset;
2766 // TODO(user): This algo is quadratic since GetCoefficientOfPositiveVar()
2767 // is linear. This can be optimized (better complexity) if needed.
2768 for (const IntegerVariable var : x_vars) {
2769 const int target_index = variable_partition[var];
2770 if (max_index != target_index) {
2771 coeff += MaxCornerDifference(
2772 var, GetCoefficientOfPositiveVar(var, exprs[target_index]),
2773 GetCoefficientOfPositiveVar(var, exprs[max_index]), integer_trail);
2774 }
2775 }
2776 return coeff;
2777}
2778
2779// Compute the value of
2780// rhs = wI(i)i * xi + Sum(k=1..d)(MPlusCoefficient_ki * zk)
2781// for variable xi for given target index I(i).
2782double ComputeContribution(
2783 const IntegerVariable xi_var, absl::Span<const IntegerVariable> z_vars,
2784 absl::Span<const LinearExpression> exprs,
2785 const util_intops::StrongVector<IntegerVariable, double>& lp_values,
2786 const IntegerTrail& integer_trail, const int target_index) {
2787 CHECK_GE(target_index, 0);
2788 CHECK_LT(target_index, exprs.size());
2789 const LinearExpression& target_expr = exprs[target_index];
2790 const double xi_value = lp_values[xi_var];
2791 const IntegerValue wt_i = GetCoefficientOfPositiveVar(xi_var, target_expr);
2792 double contrib = ToDouble(wt_i) * xi_value;
2793 for (int expr_index = 0; expr_index < exprs.size(); ++expr_index) {
2794 if (expr_index == target_index) continue;
2795 const LinearExpression& max_expr = exprs[expr_index];
2796 const double z_max_value = lp_values[z_vars[expr_index]];
2797 const IntegerValue corner_value = MaxCornerDifference(
2798 xi_var, wt_i, GetCoefficientOfPositiveVar(xi_var, max_expr),
2799 integer_trail);
2800 contrib += ToDouble(corner_value) * z_max_value;
2801 }
2802 return contrib;
2803}
2804} // namespace
2805
2806CutGenerator CreateLinMaxCutGenerator(const IntegerVariable target,
2807 absl::Span<const LinearExpression> exprs,
2808 absl::Span<const IntegerVariable> z_vars,
2809 Model* model) {
2810 CutGenerator result;
2811 std::vector<IntegerVariable> x_vars;
2812 result.vars = {target};
2813 const int num_exprs = exprs.size();
2814 for (int i = 0; i < num_exprs; ++i) {
2815 result.vars.push_back(z_vars[i]);
2816 x_vars.insert(x_vars.end(), exprs[i].vars.begin(), exprs[i].vars.end());
2817 }
2819 // All expressions should only contain positive variables.
2820 DCHECK(std::all_of(x_vars.begin(), x_vars.end(), [](IntegerVariable var) {
2821 return VariableIsPositive(var);
2822 }));
2823 result.vars.insert(result.vars.end(), x_vars.begin(), x_vars.end());
2824
2825 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2826 result.generate_cuts =
2827 [x_vars,
2828 z_vars = std::vector<IntegerVariable>(z_vars.begin(), z_vars.end()),
2829 target, num_exprs,
2830 exprs = std::vector<LinearExpression>(exprs.begin(), exprs.end()),
2831 integer_trail, model](LinearConstraintManager* manager) {
2832 const auto& lp_values = manager->LpValues();
2834 lp_values.size(), -1);
2836 variable_partition_contrib(lp_values.size(),
2837 std::numeric_limits<double>::infinity());
2838 for (int expr_index = 0; expr_index < num_exprs; ++expr_index) {
2839 for (const IntegerVariable var : x_vars) {
2840 const double contribution = ComputeContribution(
2841 var, z_vars, exprs, lp_values, *integer_trail, expr_index);
2842 const double prev_contribution = variable_partition_contrib[var];
2843 if (contribution < prev_contribution) {
2844 variable_partition[var] = expr_index;
2845 variable_partition_contrib[var] = contribution;
2846 }
2847 }
2848 }
2849
2850 LinearConstraintBuilder cut(model, /*lb=*/IntegerValue(0),
2851 /*ub=*/kMaxIntegerValue);
2852 double violation = lp_values[target];
2853 cut.AddTerm(target, IntegerValue(-1));
2854
2855 for (const IntegerVariable xi_var : x_vars) {
2856 const int input_index = variable_partition[xi_var];
2857 const LinearExpression& expr = exprs[input_index];
2858 const IntegerValue coeff = GetCoefficientOfPositiveVar(xi_var, expr);
2859 if (coeff != IntegerValue(0)) {
2860 cut.AddTerm(xi_var, coeff);
2861 }
2862 violation -= ToDouble(coeff) * lp_values[xi_var];
2863 }
2864 for (int expr_index = 0; expr_index < num_exprs; ++expr_index) {
2865 const IntegerVariable z_var = z_vars[expr_index];
2866 const IntegerValue z_coeff = MPlusCoefficient(
2867 x_vars, exprs, variable_partition, expr_index, *integer_trail);
2868 if (z_coeff != IntegerValue(0)) {
2869 cut.AddTerm(z_var, z_coeff);
2870 }
2871 violation -= ToDouble(z_coeff) * lp_values[z_var];
2872 }
2873 if (violation > 1e-2) {
2874 manager->AddCut(cut.Build(), "LinMax");
2875 }
2876 return true;
2877 };
2878 return result;
2879}
2880
2881namespace {
2882
2883IntegerValue EvaluateMaxAffine(
2884 absl::Span<const std::pair<IntegerValue, IntegerValue>> affines,
2885 IntegerValue x) {
2886 IntegerValue y = kMinIntegerValue;
2887 for (const auto& p : affines) {
2888 y = std::max(y, x * p.first + p.second);
2889 }
2890 return y;
2891}
2892
2893} // namespace
2894
2896 const LinearExpression& target, IntegerVariable var,
2897 absl::Span<const std::pair<IntegerValue, IntegerValue>> affines,
2898 Model* model, LinearConstraintBuilder* builder) {
2899 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
2900 const IntegerValue x_min = integer_trail->LevelZeroLowerBound(var);
2901 const IntegerValue x_max = integer_trail->LevelZeroUpperBound(var);
2902
2903 const IntegerValue y_at_min = EvaluateMaxAffine(affines, x_min);
2904 const IntegerValue y_at_max = EvaluateMaxAffine(affines, x_max);
2905
2906 const IntegerValue delta_x = x_max - x_min;
2907 const IntegerValue delta_y = y_at_max - y_at_min;
2908
2909 // target <= y_at_min + (delta_y / delta_x) * (var - x_min)
2910 // delta_x * target <= delta_x * y_at_min + delta_y * (var - x_min)
2911 // -delta_y * var + delta_x * target <= delta_x * y_at_min - delta_y * x_min
2912 //
2913 // Checks the rhs for overflows.
2914 if (ProdOverflow(delta_x, y_at_min) || ProdOverflow(delta_x, y_at_max) ||
2915 ProdOverflow(delta_y, x_min) || ProdOverflow(delta_y, x_max)) {
2916 return false;
2917 }
2918
2919 // Checks target * delta_x for overflow.
2920 int64_t abs_magnitude = std::abs(target.offset.value());
2921 for (int i = 0; i < target.vars.size(); ++i) {
2922 const IntegerVariable var = target.vars[i];
2923 const IntegerValue var_min = integer_trail->LevelZeroLowerBound(var);
2924 const IntegerValue var_max = integer_trail->LevelZeroUpperBound(var);
2925 abs_magnitude = CapAdd(
2926 CapProd(std::max(std::abs(var_min.value()), std::abs(var_max.value())),
2927 std::abs(target.coeffs[i].value())),
2928 abs_magnitude);
2929 }
2930 if (AtMinOrMaxInt64(abs_magnitude) ||
2931 AtMinOrMaxInt64(CapProd(abs_magnitude, delta_x.value()))) {
2932 return false;
2933 }
2934
2935 builder->ResetBounds(kMinIntegerValue, delta_x * y_at_min - delta_y * x_min);
2936 builder->AddLinearExpression(target, delta_x);
2937 builder->AddTerm(var, -delta_y);
2938
2939 // Prevent to create constraints that can overflow.
2940 if (!ValidateLinearConstraintForOverflow(builder->Build(), *integer_trail)) {
2941 VLOG(2) << "Linear constraint can cause overflow: " << builder->Build();
2942
2943 return false;
2944 }
2945
2946 return true;
2947}
2948
2950 LinearExpression target, IntegerVariable var,
2951 std::vector<std::pair<IntegerValue, IntegerValue>> affines,
2952 const std::string cut_name, Model* model) {
2953 CutGenerator result;
2954 result.vars = target.vars;
2955 result.vars.push_back(var);
2957
2958 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2959 result.generate_cuts = [target, var, affines, cut_name, integer_trail,
2960 model](LinearConstraintManager* manager) {
2961 if (integer_trail->IsFixed(var)) return true;
2962 LinearConstraintBuilder builder(model);
2963 if (BuildMaxAffineUpConstraint(target, var, affines, model, &builder)) {
2964 manager->AddCut(builder.Build(), cut_name);
2965 }
2966 return true;
2967 };
2968 return result;
2969}
2970
2972 absl::Span<const IntegerVariable> base_variables, Model* model) {
2973 // Filter base_variables to only keep the one with a literal view, and
2974 // do the conversion.
2975 std::vector<IntegerVariable> variables;
2976 std::vector<Literal> literals;
2977 absl::flat_hash_map<LiteralIndex, IntegerVariable> positive_map;
2978 absl::flat_hash_map<LiteralIndex, IntegerVariable> negative_map;
2979 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
2980 auto* encoder = model->GetOrCreate<IntegerEncoder>();
2981 for (const IntegerVariable var : base_variables) {
2982 if (integer_trail->LowerBound(var) != IntegerValue(0)) continue;
2983 if (integer_trail->UpperBound(var) != IntegerValue(1)) continue;
2984 const LiteralIndex literal_index = encoder->GetAssociatedLiteral(
2985 IntegerLiteral::GreaterOrEqual(var, IntegerValue(1)));
2986 if (literal_index != kNoLiteralIndex) {
2987 variables.push_back(var);
2988 literals.push_back(Literal(literal_index));
2989 positive_map[literal_index] = var;
2990 negative_map[Literal(literal_index).NegatedIndex()] = var;
2991 }
2992 }
2993 CutGenerator result;
2994 result.vars = variables;
2995 auto* implication_graph = model->GetOrCreate<BinaryImplicationGraph>();
2996 result.only_run_at_level_zero = true;
2997 result.generate_cuts = [variables, literals, implication_graph, positive_map,
2998 negative_map,
2999 model](LinearConstraintManager* manager) {
3000 std::vector<double> packed_values;
3001 std::vector<double> packed_reduced_costs;
3002 const auto& lp_values = manager->LpValues();
3003 const auto& reduced_costs = manager->ReducedCosts();
3004 for (int i = 0; i < literals.size(); ++i) {
3005 packed_values.push_back(lp_values[variables[i]]);
3006 packed_reduced_costs.push_back(reduced_costs[variables[i]]);
3007 }
3008 const std::vector<std::vector<Literal>> at_most_ones =
3009 implication_graph->GenerateAtMostOnesWithLargeWeight(
3010 literals, packed_values, packed_reduced_costs);
3011
3012 for (const std::vector<Literal>& at_most_one : at_most_ones) {
3013 // We need to express such "at most one" in term of the initial
3014 // variables, so we do not use the
3015 // LinearConstraintBuilder::AddLiteralTerm() here.
3017 model, IntegerValue(std::numeric_limits<int64_t>::min()),
3018 IntegerValue(1));
3019 for (const Literal l : at_most_one) {
3020 if (positive_map.contains(l.Index())) {
3021 builder.AddTerm(positive_map.at(l.Index()), IntegerValue(1));
3022 } else {
3023 // Add 1 - X to the linear constraint.
3024 builder.AddTerm(negative_map.at(l.Index()), IntegerValue(-1));
3025 builder.AddConstant(IntegerValue(1));
3026 }
3027 }
3028
3029 manager->AddCut(builder.Build(), "Clique");
3030 }
3031 return true;
3032 };
3033 return result;
3034}
3035
3036} // namespace sat
3037} // namespace operations_research
DomainIteratorBeginEnd Values() const &
bool TrySimpleSeparation(const CutData &input_ct)
Definition cuts.cc:1904
void Initialize(absl::Span< const IntegerVariable > lp_vars)
Definition cuts.cc:1898
bool TrySimpleKnapsack(const CutData &input_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1534
bool TryWithLetchfordSouliLifting(const CutData &input_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1753
bool TrySingleNodeFlow(const CutData &input_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1651
bool ConvertToLinearConstraint(const CutData &cut, LinearConstraint *output)
Definition cuts.cc:353
int AddOrMergeBooleanTerms(absl::Span< CutTerm > terms, IntegerValue t, CutData *cut)
Definition cuts.cc:300
void ApplyWithPotentialBumpAndGUB(const std::function< IntegerValue(IntegerValue)> &f, IntegerValue divisor, CutData *cut)
Definition cuts.cc:476
std::vector< CutTerm > * ClearedMutableTempTerms()
Definition cuts.h:250
void RecomputeCacheAndSeparateSomeImpliedBoundCuts(const util_intops::StrongVector< IntegerVariable, double > &lp_values)
Definition cuts.cc:2338
bool CacheDataForCut(IntegerVariable first_slack, CutData *cut)
Definition cuts.cc:2548
bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, bool complement, CutTerm *term, absl::int128 *rhs, std::vector< CutTerm > *new_bool_terms)
Definition cuts.cc:2520
BestImpliedBoundInfo GetCachedImpliedBoundInfo(IntegerVariable var) const
Definition cuts.cc:2258
bool DecomposeWithImpliedLowerBound(const CutTerm &term, IntegerValue factor_t, CutTerm &bool_term, CutTerm &slack_term)
Definition cuts.cc:2348
std::tuple< int, int, int > PostprocessWithImpliedBound(const std::function< IntegerValue(IntegerValue)> &f, IntegerValue factor_t, CutData *cut)
Definition cuts.cc:2439
bool DecomposeWithImpliedUpperBound(const CutTerm &term, IntegerValue factor_t, CutTerm &bool_term, CutTerm &slack_term)
Definition cuts.cc:2418
bool ComputeCut(RoundingOptions options, const CutData &base_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1002
IntegerValue FixedValue(IntegerVariable i) const
Definition integer.h:1549
bool IsFixed(IntegerVariable i) const
Definition integer.h:1545
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition integer.h:1657
const Domain & InitialVariableDomain(IntegerVariable var) const
Definition integer.cc:895
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition integer.h:1650
void ResetBounds(IntegerValue lb, IntegerValue ub)
void AddTerm(IntegerVariable var, IntegerValue coeff)
void AddLinearExpression(const LinearExpression &expr)
LiteralIndex NegatedIndex() const
Definition sat_base.h:93
IntegerValue GetBestLowerBound(std::string &suffix)
Definition cuts.cc:2631
void Add(const AffineExpression &expr, int num_expr, const IntegerTrail &integer_trail)
Definition cuts.cc:2584
void TransferToManager(LinearConstraintManager *manager)
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:55
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
void DivideByGCD(LinearConstraint *constraint)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
bool BuildMaxAffineUpConstraint(const LinearExpression &target, IntegerVariable var, absl::Span< const std::pair< IntegerValue, IntegerValue > > affines, Model *model, LinearConstraintBuilder *builder)
Definition cuts.cc:2895
IntegerValue GetFactorT(IntegerValue rhs_remainder, IntegerValue divisor, IntegerValue max_magnitude)
Definition cuts.cc:695
IntType IntTypeAbs(IntType t)
CutGenerator CreatePositiveMultiplicationCutGenerator(AffineExpression z, AffineExpression x, AffineExpression y, int linearization_level, Model *model)
Definition cuts.cc:2103
bool ProdOverflow(IntegerValue t, IntegerValue value)
const LiteralIndex kNoLiteralIndex(-1)
LinearConstraint ComputeHyperplanBelowSquare(AffineExpression x, AffineExpression square, IntegerValue x_value, Model *model)
Definition cuts.cc:2210
std::vector< IntegerVariable > NegationOf(absl::Span< const IntegerVariable > vars)
Definition integer.cc:52
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
const IntegerVariable kNoIntegerVariable(-1)
CutGenerator CreateLinMaxCutGenerator(const IntegerVariable target, absl::Span< const LinearExpression > exprs, absl::Span< const IntegerVariable > z_vars, Model *model)
Definition cuts.cc:2806
void CleanTermsAndFillConstraint(std::vector< std::pair< IntegerVariable, IntegerValue > > *terms, LinearExpression *output)
std::function< IntegerValue(IntegerValue)> GetSuperAdditiveStrengtheningFunction(IntegerValue positive_rhs, IntegerValue min_magnitude)
Definition cuts.cc:794
CutGenerator CreateAllDifferentCutGenerator(absl::Span< const AffineExpression > exprs, Model *model)
Definition cuts.cc:2694
CutGenerator CreateSquareCutGenerator(AffineExpression y, AffineExpression x, int linearization_level, Model *model)
Definition cuts.cc:2222
CutGenerator CreateCliqueCutGenerator(absl::Span< const IntegerVariable > base_variables, Model *model)
Definition cuts.cc:2971
IntegerVariable PositiveVariable(IntegerVariable i)
IntegerValue PositiveRemainder(IntegerValue dividend, IntegerValue positive_divisor)
CutGenerator CreateMaxAffineCutGenerator(LinearExpression target, IntegerVariable var, std::vector< std::pair< IntegerValue, IntegerValue > > affines, const std::string cut_name, Model *model)
Definition cuts.cc:2949
IntegerValue CapAddI(IntegerValue a, IntegerValue b)
LinearConstraint ComputeHyperplanAboveSquare(AffineExpression x, AffineExpression square, IntegerValue x_lb, IntegerValue x_ub, Model *model)
Definition cuts.cc:2198
IntegerValue GetCoefficientOfPositiveVar(const IntegerVariable var, const LinearExpression &expr)
bool ValidateLinearConstraintForOverflow(const LinearConstraint &constraint, const IntegerTrail &integer_trail)
std::function< IntegerValue(IntegerValue)> ExtendNegativeFunction(std::function< IntegerValue(IntegerValue)> base_f, IntegerValue period)
Definition cuts.h:376
std::function< IntegerValue(IntegerValue)> GetSuperAdditiveStrengtheningMirFunction(IntegerValue positive_rhs, IntegerValue scaling)
Definition cuts.cc:829
bool VariableIsPositive(IntegerVariable i)
IntegerValue CapSubI(IntegerValue a, IntegerValue b)
bool AtMinOrMaxInt64I(IntegerValue t)
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
double ToDouble(IntegerValue value)
std::function< IntegerValue(IntegerValue)> GetSuperAdditiveRoundingFunction(IntegerValue rhs_remainder, IntegerValue divisor, IntegerValue t, IntegerValue max_scaling)
Definition cuts.cc:709
OR-Tools root namespace.
bool AtMinOrMaxInt64(int64_t x)
int64_t CapAdd(int64_t x, int64_t y)
int64_t FloorRatio(int64_t value, int64_t positive_coeff)
int64_t CeilRatio(int64_t value, int64_t positive_coeff)
int64_t CapProd(int64_t x, int64_t y)
if(!yyg->yy_init)
Definition parser.yy.cc:966
static int input(yyscan_t yyscanner)
IntegerValue ValueAt(IntegerValue var_value) const
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
bool FillFromLinearConstraint(const LinearConstraint &base_ct, const util_intops::StrongVector< IntegerVariable, double > &lp_values, IntegerTrail *integer_trail)
Definition cuts.cc:174
bool AllCoefficientsArePositive() const
Definition cuts.cc:232
std::vector< CutTerm > terms
Definition cuts.h:153
bool FillFromParallelVectors(IntegerValue ub, absl::Span< const IntegerVariable > vars, absl::Span< const IntegerValue > coeffs, absl::Span< const double > lp_values, absl::Span< const IntegerValue > lower_bounds, absl::Span< const IntegerValue > upper_bounds)
Definition cuts.cc:192
double ComputeEfficacy() const
Definition cuts.cc:265
std::string DebugString() const
Definition cuts.cc:76
double ComputeViolation() const
Definition cuts.cc:257
bool AppendOneTerm(IntegerVariable var, IntegerValue coeff, double lp_value, IntegerValue lb, IntegerValue ub)
Definition cuts.cc:137
absl::AnyInvocable< bool(LinearConstraintManager *manager)> generate_cuts
Definition cuts.h:60
std::vector< IntegerVariable > vars
Definition cuts.h:59
IntegerVariable GetUnderlyingLiteralOrNone() const
Definition cuts.cc:117
std::string DebugString() const
Definition cuts.cc:68
double LpDistToMaxValue() const
Definition cuts.h:73
void ReplaceExpressionByLiteral(IntegerVariable var)
Definition cuts.cc:103
void Complement(absl::int128 *rhs)
Definition cuts.cc:84
std::array< IntegerVariable, 2 > expr_vars
Definition cuts.h:108
std::array< IntegerValue, 2 > expr_coeffs
Definition cuts.h:109
bool HasRelevantLpValue() const
Definition cuts.h:69
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
std::unique_ptr< IntegerValue[]> coeffs
std::unique_ptr< IntegerVariable[]> vars