Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
cuts.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/cuts.h"
15
16#include <algorithm>
17#include <array>
18#include <cmath>
19#include <cstdint>
20#include <functional>
21#include <limits>
22#include <string>
23#include <utility>
24#include <vector>
25
26#include "absl/base/attributes.h"
27#include "absl/container/btree_map.h"
28#include "absl/container/btree_set.h"
29#include "absl/container/flat_hash_map.h"
30#include "absl/container/flat_hash_set.h"
31#include "absl/log/check.h"
32#include "absl/meta/type_traits.h"
33#include "absl/numeric/int128.h"
34#include "absl/strings/str_cat.h"
35#include "absl/strings/string_view.h"
36#include "absl/types/span.h"
41#include "ortools/sat/clause.h"
43#include "ortools/sat/integer.h"
46#include "ortools/sat/model.h"
52
53namespace operations_research {
54namespace sat {
55
56namespace {
57
58// TODO(user): the function ToDouble() does some testing for min/max integer
59// value and we don't need that here.
60double AsDouble(IntegerValue v) { return static_cast<double>(v.value()); }
61
62} // namespace
63
64std::string CutTerm::DebugString() const {
65 return absl::StrCat("coeff=", coeff.value(), " lp=", lp_value,
66 " range=", bound_diff.value(), " expr=[",
67 expr_coeffs[0].value(), ",", expr_coeffs[1].value(), "]",
68 " * [V", expr_vars[0].value(), ",V", expr_vars[1].value(),
69 "]", " + ", expr_offset.value());
70}
71
72std::string CutData::DebugString() const {
73 std::string result = absl::StrCat("CutData rhs=", rhs, "\n");
74 for (const CutTerm& term : terms) {
75 absl::StrAppend(&result, term.DebugString(), "\n");
76 }
77 return result;
78}
79
80void CutTerm::Complement(absl::int128* rhs) {
81 // We replace coeff * X by coeff * (X - bound_diff + bound_diff)
82 // which gives -coeff * complement(X) + coeff * bound_diff;
83 *rhs -= absl::int128(coeff.value()) * absl::int128(bound_diff.value());
84
85 // We keep the same expression variable.
86 for (int i = 0; i < 2; ++i) {
88 }
90
91 // Note that this is not involutive because of floating point error. Fix?
92 lp_value = static_cast<double>(bound_diff.value()) - lp_value;
93 coeff = -coeff;
94
95 // Swap the implied bound info.
97}
98
100 CHECK_EQ(bound_diff, 1);
101 expr_coeffs[1] = 0;
102 if (VariableIsPositive(var)) {
103 expr_vars[0] = var;
104 expr_coeffs[0] = 1;
105 expr_offset = 0;
106 } else {
108 expr_coeffs[0] = -1;
109 expr_offset = 1;
110 }
111}
112
113IntegerVariable CutTerm::GetUnderlyingLiteralOrNone() const {
114 if (expr_coeffs[1] != 0) return kNoIntegerVariable;
115 if (bound_diff != 1) return kNoIntegerVariable;
116
117 if (expr_coeffs[0] > 0) {
118 if (expr_coeffs[0] != 1) return kNoIntegerVariable;
119 if (expr_offset != 0) return kNoIntegerVariable;
120 CHECK(VariableIsPositive(expr_vars[0]));
121 return expr_vars[0];
122 }
123
124 if (expr_coeffs[0] != -1) return kNoIntegerVariable;
125 if (expr_offset != 1) return kNoIntegerVariable;
126 CHECK(VariableIsPositive(expr_vars[0]));
127 return NegationOf(expr_vars[0]);
128}
129
130// To try to minimize the risk of overflow, we switch to the bound closer
131// to the lp_value. Since most of our base constraint for cut are tight,
132// hopefully this is not too bad.
133bool CutData::AppendOneTerm(IntegerVariable var, IntegerValue coeff,
134 double lp_value, IntegerValue lb, IntegerValue ub) {
135 if (coeff == 0) return true;
136 const IntegerValue bound_diff = ub - lb;
137
138 // Complement the variable so that it is has a positive coefficient.
139 const bool complement = coeff < 0;
140
141 // See formula below, the constant term is either coeff * lb or coeff * ub.
142 rhs -= absl::int128(coeff.value()) *
143 absl::int128(complement ? ub.value() : lb.value());
144
145 // Deal with fixed variable, no need to shift back in this case, we can
146 // just remove the term.
147 if (bound_diff == 0) return true;
148
149 CutTerm entry;
150 entry.expr_vars[0] = var;
151 entry.expr_coeffs[1] = 0;
152 entry.bound_diff = bound_diff;
153 if (complement) {
154 // X = -(UB - X) + UB
155 entry.expr_coeffs[0] = -IntegerValue(1);
156 entry.expr_offset = ub;
157 entry.coeff = -coeff;
158 entry.lp_value = static_cast<double>(ub.value()) - lp_value;
159 } else {
160 // C = (X - LB) + LB
161 entry.expr_coeffs[0] = IntegerValue(1);
162 entry.expr_offset = -lb;
163 entry.coeff = coeff;
164 entry.lp_value = lp_value - static_cast<double>(lb.value());
165 }
166 terms.push_back(entry);
167 return true;
168}
169
171 const LinearConstraint& base_ct,
173 IntegerTrail* integer_trail) {
174 rhs = absl::int128(base_ct.ub.value());
175 terms.clear();
176 const int num_terms = base_ct.num_terms;
177 for (int i = 0; i < num_terms; ++i) {
178 const IntegerVariable var = base_ct.vars[i];
179 if (!AppendOneTerm(var, base_ct.coeffs[i], lp_values[base_ct.vars[i]],
180 integer_trail->LevelZeroLowerBound(var),
181 integer_trail->LevelZeroUpperBound(var))) {
182 return false;
183 }
184 }
185 return true;
186}
187
189 IntegerValue ub, absl::Span<const IntegerVariable> vars,
190 absl::Span<const IntegerValue> coeffs, absl::Span<const double> lp_values,
191 absl::Span<const IntegerValue> lower_bounds,
192 absl::Span<const IntegerValue> upper_bounds) {
193 rhs = absl::int128(ub.value());
194 terms.clear();
195
196 const int size = lp_values.size();
197 if (size == 0) return true;
198
199 CHECK_EQ(vars.size(), size);
200 CHECK_EQ(coeffs.size(), size);
201 CHECK_EQ(lower_bounds.size(), size);
202 CHECK_EQ(upper_bounds.size(), size);
203
204 for (int i = 0; i < size; ++i) {
205 if (!AppendOneTerm(vars[i], coeffs[i], lp_values[i], lower_bounds[i],
206 upper_bounds[i])) {
207 return false;
208 }
209 }
210
211 return true;
212}
213
215 for (CutTerm& term : terms) {
216 if (term.coeff >= 0) continue;
217 term.Complement(&rhs);
218 }
219}
220
222 for (CutTerm& term : terms) {
223 if (term.lp_value <= term.LpDistToMaxValue()) continue;
224 term.Complement(&rhs);
225 }
226}
227
229 for (const CutTerm& term : terms) {
230 if (term.coeff < 0) return false;
231 }
232 return true;
233}
234
237 max_magnitude = 0;
238 for (int i = 0; i < terms.size(); ++i) {
239 CutTerm& entry = terms[i];
240 max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.coeff));
241 if (entry.HasRelevantLpValue()) {
242 std::swap(terms[num_relevant_entries], entry);
244 }
245 }
246
247 // Sort by larger lp_value first.
248 std::sort(terms.begin(), terms.begin() + num_relevant_entries,
249 [](const CutTerm& a, const CutTerm& b) {
250 return a.lp_value > b.lp_value;
251 });
252}
253
255 double violation = -static_cast<double>(rhs);
256 for (const CutTerm& term : terms) {
257 violation += term.lp_value * static_cast<double>(term.coeff.value());
258 }
259 return violation;
260}
261
263 double violation = -static_cast<double>(rhs);
264 double norm = 0.0;
265 for (const CutTerm& term : terms) {
266 const double coeff = static_cast<double>(term.coeff.value());
267 violation += term.lp_value * coeff;
268 norm += coeff * coeff;
269 }
270 return violation / std::sqrt(norm);
271}
272
274 num_merges_ = 0;
275 constraint_is_indexed_ = false;
276 bool_index_.clear();
277 secondary_bool_index_.clear();
278}
279
280void CutDataBuilder::RegisterAllBooleanTerms(const CutData& cut) {
281 constraint_is_indexed_ = true;
282 const int size = cut.terms.size();
283 for (int i = 0; i < size; ++i) {
284 const CutTerm& term = cut.terms[i];
285 if (term.bound_diff != 1) continue;
286 if (!term.IsSimple()) continue;
287
288 // Initially we shouldn't have duplicate bools and (1 - bools).
289 // So we just fill bool_index_.
290 bool_index_[term.expr_vars[0]] = i;
291 }
292}
293
294void CutDataBuilder::AddOrMergeTerm(const CutTerm& term, IntegerValue t,
295 CutData* cut) {
296 if (!constraint_is_indexed_) {
297 RegisterAllBooleanTerms(*cut);
298 }
299
300 DCHECK(term.IsSimple());
301 const IntegerVariable var = term.expr_vars[0];
302 const bool is_positive = (term.expr_coeffs[0] > 0);
303 const int new_index = cut->terms.size();
304 const auto [it, inserted] = bool_index_.insert({var, new_index});
305 if (inserted) {
306 cut->terms.push_back(term);
307 return;
308 }
309
310 // If the referred var is not right, replace the entry.
311 int entry_index = it->second;
312 if (entry_index >= new_index || cut->terms[entry_index].expr_vars[0] != var) {
313 it->second = new_index;
314 cut->terms.push_back(term);
315 return;
316 }
317
318 // If the sign is not right, look into secondary hash_map for opposite sign.
319 if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) {
320 const auto [it, inserted] = secondary_bool_index_.insert({var, new_index});
321 if (inserted) {
322 cut->terms.push_back(term);
323 return;
324 }
325
326 // If the referred var is not right, replace the entry.
327 entry_index = it->second;
328 if (entry_index >= new_index ||
329 cut->terms[entry_index].expr_vars[0] != var) {
330 it->second = new_index;
331 cut->terms.push_back(term);
332 return;
333 }
334
335 // If the sign is not right, replace the entry.
336 if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) {
337 it->second = new_index;
338 cut->terms.push_back(term);
339 return;
340 }
341 }
342 DCHECK_EQ(cut->terms[entry_index].expr_vars[0], var);
343 DCHECK_EQ((cut->terms[entry_index].expr_coeffs[0] > 0), is_positive);
344
345 // We can only merge the term if term.coeff + old_coeff do not overflow and
346 // if t * new_coeff do not overflow.
347 //
348 // If we cannot merge the term, we will keep them separate. The produced cut
349 // will be less strong, but can still be used.
350 const IntegerValue new_coeff =
351 CapAddI(cut->terms[entry_index].coeff, term.coeff);
352 if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) {
353 // If we cannot merge the term, we keep them separate.
354 cut->terms.push_back(term);
355 } else {
356 ++num_merges_;
357 cut->terms[entry_index].coeff = new_coeff;
358 }
359}
360
361// TODO(user): Divide by gcd first to avoid possible overflow in the
362// conversion? it is however unlikely given that our coeffs should be small.
363ABSL_DEPRECATED("Only used in tests, this will be removed.")
364bool CutDataBuilder::ConvertToLinearConstraint(const CutData& cut,
365 LinearConstraint* output) {
366 tmp_map_.clear();
367 if (cut.rhs > absl::int128(std::numeric_limits<int64_t>::max()) ||
368 cut.rhs < absl::int128(std::numeric_limits<int64_t>::min())) {
369 return false;
370 }
371 IntegerValue new_rhs = static_cast<int64_t>(cut.rhs);
372 for (const CutTerm& term : cut.terms) {
373 for (int i = 0; i < 2; ++i) {
374 if (term.expr_coeffs[i] == 0) continue;
375 if (!AddProductTo(term.coeff, term.expr_coeffs[i],
376 &tmp_map_[term.expr_vars[i]])) {
377 return false;
378 }
379 }
380 if (!AddProductTo(-term.coeff, term.expr_offset, &new_rhs)) {
381 return false;
382 }
383 }
384
385 output->lb = kMinIntegerValue;
386 output->ub = new_rhs;
387 output->resize(tmp_map_.size());
388 int new_size = 0;
389 for (const auto [var, coeff] : tmp_map_) {
390 if (coeff == 0) continue;
391 output->vars[new_size] = var;
392 output->coeffs[new_size] = coeff;
393 ++new_size;
394 }
395 output->resize(new_size);
396 DivideByGCD(output);
397 return true;
398}
399
400namespace {
401
402// Minimum amount of violation of the cut constraint by the solution. This
403// is needed to avoid numerical issues and adding cuts with minor effect.
404const double kMinCutViolation = 1e-4;
405
406IntegerValue PositiveRemainder(absl::int128 dividend,
407 IntegerValue positive_divisor) {
408 DCHECK_GT(positive_divisor, 0);
409 const IntegerValue m =
410 static_cast<int64_t>(dividend % absl::int128(positive_divisor.value()));
411 return m < 0 ? m + positive_divisor : m;
412}
413
414// We use the fact that f(k * divisor + rest) = k * f(divisor) + f(rest)
415absl::int128 ApplyToInt128(const std::function<IntegerValue(IntegerValue)>& f,
416 IntegerValue divisor, absl::int128 value) {
417 const IntegerValue rest = PositiveRemainder(value, divisor);
418 const absl::int128 k =
419 (value - absl::int128(rest.value())) / absl::int128(divisor.value());
420 const absl::int128 result =
421 k * absl::int128(f(divisor).value()) + absl::int128(f(rest).value());
422 return result;
423}
424
425// Apply f() to the cut with a potential improvement for one Boolean:
426//
427// If we have a Boolean X, and a cut: terms + a * X <= b;
428// By setting X to true or false, we have two inequalities:
429// terms <= b if X == 0
430// terms <= b - a if X == 1
431// We can apply f to both inequalities and recombine:
432// f(terms) <= f(b) * (1 - X) + f(b - a) * X
433// Which change the final coeff of X from f(a) to [f(b) - f(b - a)].
434// This can only improve the cut since f(b) >= f(b - a) + f(a)
435int ApplyWithPotentialBump(const std::function<IntegerValue(IntegerValue)>& f,
436 const IntegerValue divisor, CutData* cut) {
437 int bump_index = -1;
438 double bump_score = -1.0;
439 IntegerValue bump_coeff;
440 const IntegerValue remainder = PositiveRemainder(cut->rhs, divisor);
441 const IntegerValue f_remainder = f(remainder);
442 cut->rhs = ApplyToInt128(f, divisor, cut->rhs);
443 for (int i = 0; i < cut->terms.size(); ++i) {
444 CutTerm& entry = cut->terms[i];
445 const IntegerValue f_coeff = f(entry.coeff);
446 if (entry.bound_diff == 1) {
447 // TODO(user): we probably don't need int128 here, but we need
448 // t * (remainder - entry.coeff) not to overflow, and we can't really be
449 // sure.
450 const IntegerValue alternative =
451 entry.coeff > 0
452 ? f_remainder - f(remainder - entry.coeff)
453 : f_remainder - IntegerValue(static_cast<int64_t>(ApplyToInt128(
454 f, divisor,
455 absl::int128(remainder.value()) -
456 absl::int128(entry.coeff.value()))));
457 DCHECK_GE(alternative, f_coeff);
458 if (alternative > f_coeff) {
459 const double score = ToDouble(alternative - f_coeff) * entry.lp_value;
460 if (score > bump_score) {
461 bump_index = i;
462 bump_score = score;
463 bump_coeff = alternative;
464 }
465 }
466 }
467 entry.coeff = f_coeff;
468 }
469 if (bump_index >= 0) {
470 cut->terms[bump_index].coeff = bump_coeff;
471 return 1;
472 }
473 return 0;
474}
475
476} // namespace
477
478// Compute the larger t <= max_t such that t * rhs_remainder >= divisor / 2.
479//
480// This is just a separate function as it is slightly faster to compute the
481// result only once.
482IntegerValue GetFactorT(IntegerValue rhs_remainder, IntegerValue divisor,
483 IntegerValue max_magnitude) {
484 // Make sure that when we multiply the rhs or the coefficient by a factor t,
485 // we do not have an integer overflow. Note that the rhs should be counted
486 // in max_magnitude since we will apply f() on it.
487 IntegerValue max_t(std::numeric_limits<int64_t>::max());
488 if (max_magnitude != 0) {
489 max_t = max_t / max_magnitude;
490 }
491 return rhs_remainder == 0
492 ? max_t
493 : std::min(max_t, CeilRatio(divisor / 2, rhs_remainder));
494}
495
496std::function<IntegerValue(IntegerValue)> GetSuperAdditiveRoundingFunction(
497 IntegerValue rhs_remainder, IntegerValue divisor, IntegerValue t,
498 IntegerValue max_scaling) {
499 DCHECK_GE(max_scaling, 1);
500 DCHECK_GE(t, 1);
501
502 // Adjust after the multiplication by t.
503 rhs_remainder *= t;
504 DCHECK_LT(rhs_remainder, divisor);
505
506 // Make sure we don't have an integer overflow below. Note that we assume that
507 // divisor and the maximum coeff magnitude are not too different (maybe a
508 // factor 1000 at most) so that the final result will never overflow.
509 max_scaling =
510 std::min(max_scaling, std::numeric_limits<int64_t>::max() / divisor);
511
512 const IntegerValue size = divisor - rhs_remainder;
513 if (max_scaling == 1 || size == 1) {
514 // TODO(user): Use everywhere a two step computation to avoid overflow?
515 // First divide by divisor, then multiply by t. For now, we limit t so that
516 // we never have an overflow instead.
517 return [t, divisor](IntegerValue coeff) {
518 return FloorRatio(t * coeff, divisor);
519 };
520 } else if (size <= max_scaling) {
521 return [size, rhs_remainder, t, divisor](IntegerValue coeff) {
522 const IntegerValue t_coeff = t * coeff;
523 const IntegerValue ratio = FloorRatio(t_coeff, divisor);
524 const IntegerValue remainder = PositiveRemainder(t_coeff, divisor);
525 const IntegerValue diff = remainder - rhs_remainder;
526 return size * ratio + std::max(IntegerValue(0), diff);
527 };
528 } else if (max_scaling.value() * rhs_remainder.value() < divisor) {
529 // Because of our max_t limitation, the rhs_remainder might stay small.
530 //
531 // If it is "too small" we cannot use the code below because it will not be
532 // valid. So we just divide divisor into max_scaling bucket. The
533 // rhs_remainder will be in the bucket 0.
534 //
535 // Note(user): This seems the same as just increasing t, modulo integer
536 // overflows. Maybe we should just always do the computation like this so
537 // that we can use larger t even if coeff is close to kint64max.
538 return [t, divisor, max_scaling](IntegerValue coeff) {
539 const IntegerValue t_coeff = t * coeff;
540 const IntegerValue ratio = FloorRatio(t_coeff, divisor);
541 const IntegerValue remainder = PositiveRemainder(t_coeff, divisor);
542 const IntegerValue bucket = FloorRatio(remainder * max_scaling, divisor);
543 return max_scaling * ratio + bucket;
544 };
545 } else {
546 // We divide (size = divisor - rhs_remainder) into (max_scaling - 1) buckets
547 // and increase the function by 1 / max_scaling for each of them.
548 //
549 // Note that for different values of max_scaling, we get a family of
550 // functions that do not dominate each others. So potentially, a max scaling
551 // as low as 2 could lead to the better cut (this is exactly the Letchford &
552 // Lodi function).
553 //
554 // Another interesting fact, is that if we want to compute the maximum alpha
555 // for a constraint with 2 terms like:
556 // divisor * Y + (ratio * divisor + remainder) * X
557 // <= rhs_ratio * divisor + rhs_remainder
558 // so that we have the cut:
559 // Y + (ratio + alpha) * X <= rhs_ratio
560 // This is the same as computing the maximum alpha such that for all integer
561 // X > 0 we have CeilRatio(alpha * divisor * X, divisor)
562 // <= CeilRatio(remainder * X - rhs_remainder, divisor).
563 // We can prove that this alpha is of the form (n - 1) / n, and it will
564 // be reached by such function for a max_scaling of n.
565 //
566 // TODO(user): This function is not always maximal when
567 // size % (max_scaling - 1) == 0. Improve?
568 return [size, rhs_remainder, t, divisor, max_scaling](IntegerValue coeff) {
569 const IntegerValue t_coeff = t * coeff;
570 const IntegerValue ratio = FloorRatio(t_coeff, divisor);
571 const IntegerValue remainder = PositiveRemainder(t_coeff, divisor);
572 const IntegerValue diff = remainder - rhs_remainder;
573 const IntegerValue bucket =
574 diff > 0 ? CeilRatio(diff * (max_scaling - 1), size)
575 : IntegerValue(0);
576 return max_scaling * ratio + bucket;
577 };
578 }
579}
580
581std::function<IntegerValue(IntegerValue)> GetSuperAdditiveStrengtheningFunction(
582 IntegerValue positive_rhs, IntegerValue min_magnitude) {
583 CHECK_GT(positive_rhs, 0);
584 CHECK_GT(min_magnitude, 0);
585
586 if (min_magnitude >= CeilRatio(positive_rhs, 2)) {
587 return [positive_rhs](IntegerValue v) {
588 if (v >= 0) return IntegerValue(0);
589 if (v > -positive_rhs) return IntegerValue(-1);
590 return IntegerValue(-2);
591 };
592 }
593
594 // The transformation only work if 2 * second_threshold >= positive_rhs.
595 //
596 // TODO(user): Limit the number of value used with scaling like above.
597 min_magnitude = std::min(min_magnitude, FloorRatio(positive_rhs, 2));
598 const IntegerValue second_threshold = positive_rhs - min_magnitude;
599 return [positive_rhs, min_magnitude, second_threshold](IntegerValue v) {
600 if (v >= 0) return IntegerValue(0);
601 if (v <= -positive_rhs) return -positive_rhs;
602 if (v <= -second_threshold) return -second_threshold;
603
604 // This should actually never happen by the definition of min_magnitude.
605 // But with it, the function is supper-additive even if min_magnitude is not
606 // correct.
607 if (v >= -min_magnitude) return -min_magnitude;
608
609 // TODO(user): we might want to intoduce some step to reduce the final
610 // magnitude of the cut.
611 return v;
612 };
613}
614
615std::function<IntegerValue(IntegerValue)>
617 IntegerValue scaling) {
618 if (scaling >= positive_rhs) {
619 // Simple case, no scaling required.
620 return [positive_rhs](IntegerValue v) {
621 if (v >= 0) return IntegerValue(0);
622 if (v <= -positive_rhs) return -positive_rhs;
623 return v;
624 };
625 }
626
627 // We need to scale.
628 scaling =
629 std::min(scaling, IntegerValue(std::numeric_limits<int64_t>::max()) /
630 positive_rhs);
631 if (scaling == 1) {
632 return [](IntegerValue v) {
633 if (v >= 0) return IntegerValue(0);
634 return IntegerValue(-1);
635 };
636 }
637
638 // We divide [-positive_rhs + 1, 0] into (scaling - 1) bucket.
639 return [positive_rhs, scaling](IntegerValue v) {
640 if (v >= 0) return IntegerValue(0);
641 if (v <= -positive_rhs) return -scaling;
642 return FloorRatio(v * (scaling - 1), (positive_rhs - 1));
643 };
644}
645
646IntegerRoundingCutHelper::~IntegerRoundingCutHelper() {
647 if (!VLOG_IS_ON(1)) return;
648 if (shared_stats_ == nullptr) return;
649 std::vector<std::pair<std::string, int64_t>> stats;
650 stats.push_back({"rounding_cut/num_initial_ibs_", total_num_initial_ibs_});
651 stats.push_back(
652 {"rounding_cut/num_initial_merges_", total_num_initial_merges_});
653 stats.push_back({"rounding_cut/num_pos_lifts", total_num_pos_lifts_});
654 stats.push_back({"rounding_cut/num_neg_lifts", total_num_neg_lifts_});
655 stats.push_back(
656 {"rounding_cut/num_post_complements", total_num_post_complements_});
657 stats.push_back({"rounding_cut/num_overflows", total_num_overflow_abort_});
658 stats.push_back({"rounding_cut/num_adjusts", total_num_coeff_adjust_});
659 stats.push_back({"rounding_cut/num_merges", total_num_merges_});
660 stats.push_back({"rounding_cut/num_bumps", total_num_bumps_});
661 stats.push_back(
662 {"rounding_cut/num_final_complements", total_num_final_complements_});
663 stats.push_back({"rounding_cut/num_dominating_f", total_num_dominating_f_});
664 shared_stats_->AddStats(stats);
665}
666
667double IntegerRoundingCutHelper::GetScaledViolation(
668 IntegerValue divisor, IntegerValue max_scaling,
669 IntegerValue remainder_threshold, const CutData& cut) {
670 absl::int128 rhs = cut.rhs;
671 IntegerValue max_magnitude = cut.max_magnitude;
672 const IntegerValue initial_rhs_remainder = PositiveRemainder(rhs, divisor);
673 if (initial_rhs_remainder < remainder_threshold) return 0.0;
674
675 // We will adjust coefficient that are just under an exact multiple of
676 // divisor to an exact multiple. This is meant to get rid of small errors
677 // that appears due to rounding error in our exact computation of the
678 // initial constraint given to this class.
679 //
680 // Each adjustement will cause the initial_rhs_remainder to increase, and we
681 // do not want to increase it above divisor. Our threshold below guarantees
682 // this. Note that the higher the rhs_remainder becomes, the more the
683 // function f() has a chance to reduce the violation, so it is not always a
684 // good idea to use all the slack we have between initial_rhs_remainder and
685 // divisor.
686 //
687 // TODO(user): We could see if for a fixed function f, the increase is
688 // interesting?
689 // before: f(rhs) - f(coeff) * lp_value
690 // after: f(rhs + increase * bound_diff) - f(coeff + increase) * lp_value.
691 adjusted_coeffs_.clear();
692 const IntegerValue adjust_threshold =
693 (divisor - initial_rhs_remainder - 1) /
694 IntegerValue(std::max(1000, cut.num_relevant_entries));
695 if (adjust_threshold > 0) {
696 // Even before we finish the adjust, we can have a lower bound on the
697 // activily loss using this divisor, and so we can abort early. This is
698 // similar to what is done below.
699 double max_violation = static_cast<double>(initial_rhs_remainder.value());
700 for (int i = 0; i < cut.num_relevant_entries; ++i) {
701 const CutTerm& entry = cut.terms[i];
702 const IntegerValue remainder = PositiveRemainder(entry.coeff, divisor);
703 if (remainder == 0) continue;
704 if (remainder <= initial_rhs_remainder) {
705 // We do not know exactly f() yet, but it will always round to the
706 // floor of the division by divisor in this case.
707 max_violation -=
708 static_cast<double>(remainder.value()) * entry.lp_value;
709 if (max_violation <= 1e-3) return 0.0;
710 continue;
711 }
712
713 // Adjust coeff of the form k * divisor - epsilon.
714 const IntegerValue adjust = divisor - remainder;
715 const IntegerValue prod = CapProdI(adjust, entry.bound_diff);
716 if (prod <= adjust_threshold) {
717 rhs += absl::int128(prod.value());
718 const IntegerValue new_coeff = entry.coeff + adjust;
719 adjusted_coeffs_.push_back({i, new_coeff});
720 max_magnitude = std::max(max_magnitude, IntTypeAbs(new_coeff));
721 }
722 }
723 }
724
725 const IntegerValue rhs_remainder = PositiveRemainder(rhs, divisor);
726 const IntegerValue t = GetFactorT(rhs_remainder, divisor, max_magnitude);
727 const auto f =
728 GetSuperAdditiveRoundingFunction(rhs_remainder, divisor, t, max_scaling);
729
730 // As we round coefficients, we will compute the loss compared to the
731 // current scaled constraint activity. As soon as this loss crosses the
732 // slack, then we known that there is no violation and we can abort early.
733 //
734 // TODO(user): modulo the scaling, we could compute the exact threshold
735 // using our current best cut. Note that we also have to account the change
736 // in slack due to the adjust code above.
737 const double scaling = ToDouble(f(divisor)) / ToDouble(divisor);
738 double max_violation = scaling * ToDouble(rhs_remainder);
739
740 // Apply f() to the cut and compute the cut violation. Note that it is
741 // okay to just look at the relevant indices since the other have a lp
742 // value which is almost zero. Doing it like this is faster, and even if
743 // the max_magnitude might be off it should still be relevant enough.
744 double violation = -static_cast<double>(ApplyToInt128(f, divisor, rhs));
745 double l2_norm = 0.0;
746 int adjusted_coeffs_index = 0;
747 for (int i = 0; i < cut.num_relevant_entries; ++i) {
748 const CutTerm& entry = cut.terms[i];
749
750 // Adjust coeff according to our previous computation if needed.
751 IntegerValue coeff = entry.coeff;
752 if (adjusted_coeffs_index < adjusted_coeffs_.size() &&
753 adjusted_coeffs_[adjusted_coeffs_index].first == i) {
754 coeff = adjusted_coeffs_[adjusted_coeffs_index].second;
755 adjusted_coeffs_index++;
756 }
757
758 if (coeff == 0) continue;
759 const IntegerValue new_coeff = f(coeff);
760 const double new_coeff_double = ToDouble(new_coeff);
761 const double lp_value = entry.lp_value;
762
763 // TODO(user): Shall we compute the norm after slack are substituted back?
764 // it might be widely different. Another reason why this might not be
765 // the best measure.
766 l2_norm += new_coeff_double * new_coeff_double;
767 violation += new_coeff_double * lp_value;
768 max_violation -= (scaling * ToDouble(coeff) - new_coeff_double) * lp_value;
769 if (max_violation <= 1e-3) return 0.0;
770 }
771 if (l2_norm == 0.0) return 0.0;
772
773 // Here we scale by the L2 norm over the "relevant" positions. This seems
774 // to work slighly better in practice.
775 //
776 // Note(user): The non-relevant position have an LP value of zero. If their
777 // coefficient is positive, it seems good not to take it into account in the
778 // norm since the larger this coeff is, the stronger the cut. If the coeff
779 // is negative though, a large coeff means a small increase from zero of the
780 // lp value will make the cut satisfied, so we might want to look at them.
781 return violation / sqrt(l2_norm);
782}
783
784// TODO(user): This is slow, 50% of run time on a2c1s1.pb.gz. Optimize!
785bool IntegerRoundingCutHelper::ComputeCut(
786 RoundingOptions options, const CutData& base_ct,
787 ImpliedBoundsProcessor* ib_processor) {
788 // Try IB before heuristic?
789 // This should be better except it can mess up the norm and the divisors.
790 cut_ = base_ct;
791 if (options.use_ib_before_heuristic && ib_processor != nullptr) {
792 ib_processor->BaseCutBuilder()->ClearNumMerges();
793 const int old_size = static_cast<int>(cut_.terms.size());
794 bool abort = true;
795 for (int i = 0; i < old_size; ++i) {
796 if (cut_.terms[i].bound_diff <= 1) continue;
797 if (!cut_.terms[i].HasRelevantLpValue()) continue;
798
799 if (options.prefer_positive_ib && cut_.terms[i].coeff < 0) {
800 // We complement the term before trying the implied bound.
801 cut_.terms[i].Complement(&cut_.rhs);
802 if (ib_processor->TryToExpandWithLowerImpliedbound(
803 IntegerValue(1), i,
804 /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) {
805 ++total_num_initial_ibs_;
806 abort = false;
807 continue;
808 }
809 cut_.terms[i].Complement(&cut_.rhs);
810 }
811
812 if (ib_processor->TryToExpandWithLowerImpliedbound(
813 IntegerValue(1), i,
814 /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) {
815 abort = false;
816 ++total_num_initial_ibs_;
817 }
818 }
819 total_num_initial_merges_ +=
820 ib_processor->BaseCutBuilder()->NumMergesSinceLastClear();
821
822 // TODO(user): We assume that this is called with and without the option
823 // use_ib_before_heuristic, so that we can abort if no IB has been applied
824 // since then we will redo the computation. This is not really clean.
825 if (abort) return false;
826 }
827
828 // Our heuristic will try to generate a few different cuts, and we will keep
829 // the most violated one scaled by the l2 norm of the relevant position.
830 //
831 // TODO(user): Experiment for the best value of this initial violation
832 // threshold. Note also that we use the l2 norm on the restricted position
833 // here. Maybe we should change that? On that note, the L2 norm usage seems
834 // a bit weird to me since it grows with the number of term in the cut. And
835 // often, we already have a good cut, and we make it stronger by adding
836 // extra terms that do not change its activity.
837 //
838 // The discussion above only concern the best_scaled_violation initial
839 // value. The remainder_threshold allows to not consider cuts for which the
840 // final efficacity is clearly lower than 1e-3 (it is a bound, so we could
841 // generate cuts with a lower efficacity than this).
842 //
843 // TODO(user): If the rhs is small and close to zero, we might want to
844 // consider different way of complementing the variables.
845 cut_.Canonicalize();
846 const IntegerValue remainder_threshold(
847 std::max(IntegerValue(1), cut_.max_magnitude / 1000));
848 if (cut_.rhs >= 0 && cut_.rhs < remainder_threshold.value()) {
849 return false;
850 }
851
852 // There is no point trying twice the same divisor or a divisor that is too
853 // small. Note that we use a higher threshold than the remainder_threshold
854 // because we can boost the remainder thanks to our adjusting heuristic
855 // below and also because this allows to have cuts with a small range of
856 // coefficients.
857 divisors_.clear();
858 for (const CutTerm& entry : cut_.terms) {
859 // Note that because of the slacks, initial coeff are here too.
860 const IntegerValue magnitude = IntTypeAbs(entry.coeff);
861 if (magnitude <= remainder_threshold) continue;
862 divisors_.push_back(magnitude);
863
864 // If we have too many divisor to try, restrict to the first ones which
865 // should correspond to the highest lp values.
866 if (divisors_.size() > 50) break;
867 }
868 if (divisors_.empty()) return false;
869 gtl::STLSortAndRemoveDuplicates(&divisors_, std::greater<IntegerValue>());
870
871 // Note that most of the time is spend here since we call this function on
872 // many linear equation, and just a few of them have a good enough scaled
873 // violation. We can spend more time afterwards to tune the cut.
874 //
875 // TODO(user): Avoid quadratic algorithm? Note that we are quadratic in
876 // relevant positions not the full cut size, but this is still too much on
877 // some problems.
878 IntegerValue best_divisor(0);
879 double best_scaled_violation = 1e-3;
880 for (const IntegerValue divisor : divisors_) {
881 // Note that the function will abort right away if PositiveRemainder() is
882 // not good enough, so it is quick for bad divisor.
883 const double violation = GetScaledViolation(divisor, options.max_scaling,
884 remainder_threshold, cut_);
885 if (violation > best_scaled_violation) {
886 best_scaled_violation = violation;
887 best_adjusted_coeffs_ = adjusted_coeffs_;
888 best_divisor = divisor;
889 }
890 }
891 if (best_divisor == 0) return false;
892
893 // Try best_divisor divided by small number.
894 for (int div = 2; div < 9; ++div) {
895 const IntegerValue divisor = best_divisor / IntegerValue(div);
896 if (divisor <= 1) continue;
897 const double violation = GetScaledViolation(divisor, options.max_scaling,
898 remainder_threshold, cut_);
899 if (violation > best_scaled_violation) {
900 best_scaled_violation = violation;
901 best_adjusted_coeffs_ = adjusted_coeffs_;
902 best_divisor = divisor;
903 }
904 }
905
906 // Re try complementation on the transformed cut.
907 for (CutTerm& entry : cut_.terms) {
908 if (!entry.HasRelevantLpValue()) break;
909 if (entry.coeff % best_divisor == 0) continue;
910
911 // Temporary complement this variable.
912 entry.Complement(&cut_.rhs);
913
914 const double violation = GetScaledViolation(
915 best_divisor, options.max_scaling, remainder_threshold, cut_);
916 if (violation > best_scaled_violation) {
917 // keep the change.
918 ++total_num_post_complements_;
919 best_scaled_violation = violation;
920 best_adjusted_coeffs_ = adjusted_coeffs_;
921 } else {
922 // Restore.
923 entry.Complement(&cut_.rhs);
924 }
925 }
926
927 // Adjust coefficients as computed by the best GetScaledViolation().
928 for (const auto [index, new_coeff] : best_adjusted_coeffs_) {
929 ++total_num_coeff_adjust_;
930 CutTerm& entry = cut_.terms[index];
931 const IntegerValue remainder = new_coeff - entry.coeff;
932 CHECK_GT(remainder, 0);
933 entry.coeff = new_coeff;
934 cut_.rhs += absl::int128(remainder.value()) *
935 absl::int128(entry.bound_diff.value());
936 cut_.max_magnitude = std::max(cut_.max_magnitude, IntTypeAbs(new_coeff));
937 }
938
939 // Create the base super-additive function f().
940 const IntegerValue rhs_remainder = PositiveRemainder(cut_.rhs, best_divisor);
941 IntegerValue factor_t =
942 GetFactorT(rhs_remainder, best_divisor, cut_.max_magnitude);
943 auto f = GetSuperAdditiveRoundingFunction(rhs_remainder, best_divisor,
944 factor_t, options.max_scaling);
945
946 // Look amongst all our possible function f() for one that dominate greedily
947 // our current best one. Note that we prefer lower scaling factor since that
948 // result in a cut with lower coefficients.
949 //
950 // We only look at relevant position and ignore the other. Not sure this is
951 // the best approach.
952 remainders_.clear();
953 for (const CutTerm& entry : cut_.terms) {
954 if (!entry.HasRelevantLpValue()) break;
955 const IntegerValue coeff = entry.coeff;
956 const IntegerValue r = PositiveRemainder(coeff, best_divisor);
957 if (r > rhs_remainder) remainders_.push_back(r);
958 }
960 if (remainders_.size() <= 100) {
961 best_rs_.clear();
962 for (const IntegerValue r : remainders_) {
963 best_rs_.push_back(f(r));
964 }
965 IntegerValue best_d = f(best_divisor);
966
967 // Note that the complexity seems high 100 * 2 * options.max_scaling, but
968 // this only run on cuts that are already efficient and the inner loop tend
969 // to abort quickly. I didn't see this code in the cpu profile so far.
970 for (const IntegerValue t :
971 {IntegerValue(1),
972 GetFactorT(rhs_remainder, best_divisor, cut_.max_magnitude)}) {
973 for (IntegerValue s(2); s <= options.max_scaling; ++s) {
974 const auto g =
975 GetSuperAdditiveRoundingFunction(rhs_remainder, best_divisor, t, s);
976 int num_strictly_better = 0;
977 rs_.clear();
978 const IntegerValue d = g(best_divisor);
979 for (int i = 0; i < best_rs_.size(); ++i) {
980 const IntegerValue temp = g(remainders_[i]);
981 if (temp * best_d < best_rs_[i] * d) break;
982 if (temp * best_d > best_rs_[i] * d) num_strictly_better++;
983 rs_.push_back(temp);
984 }
985 if (rs_.size() == best_rs_.size() && num_strictly_better > 0) {
986 ++total_num_dominating_f_;
987 f = g;
988 factor_t = t;
989 best_rs_ = rs_;
990 best_d = d;
991 }
992 }
993 }
994 }
995
996 // Use implied bounds to "lift" Booleans into the cut.
997 // This should lead to stronger cuts even if the norms might be worse.
998 num_ib_used_ = 0;
999 if (ib_processor != nullptr) {
1000 const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound(
1001 f, factor_t, &cut_, &cut_builder_);
1002 total_num_pos_lifts_ += num_lb;
1003 total_num_neg_lifts_ += num_ub;
1004 total_num_merges_ += cut_builder_.NumMergesSinceLastClear();
1005 num_ib_used_ = num_lb + num_ub;
1006 }
1007
1008 // More complementation, but for the same f.
1009 // If we can do that, it probably means our heuristics above are not great.
1010 for (int i = 0; i < 3; ++i) {
1011 const int64_t saved = total_num_final_complements_;
1012 for (CutTerm& entry : cut_.terms) {
1013 // Complementing an entry gives:
1014 // [a * X <= b] -> [-a * (diff - X) <= b - a * diff]
1015 //
1016 // We will compare what happen when we apply f:
1017 // [f(b) - f(a) * lp(X)] -> [f(b - a * diff) - f(-a) * (diff - lp(X))].
1018 //
1019 // If lp(X) is zero, then the transformation is always worse.
1020 // Because f(b - a * diff) >= f(b) + f(-a) * diff by super-additivity.
1021 //
1022 // However the larger is X, the better it gets since at diff, we have
1023 // f(b) >= f(b - a * diff) + f(a * diff) >= f(b - a * diff) + f(a) * diff.
1024 //
1025 // TODO(user): It is still unclear if we have a * X + b * (1 - X) <= rhs
1026 // for a Boolean X, what is the best way to apply f and if we should merge
1027 // the terms. If there is no other terms, best is probably
1028 // f(rhs - a) * X + f(rhs - b) * (1 - X).
1029 if (entry.coeff % best_divisor == 0) continue;
1030 if (!entry.HasRelevantLpValue()) continue;
1031
1032 // Avoid potential overflow here.
1033 const IntegerValue prod(CapProdI(entry.bound_diff, entry.coeff));
1034 const IntegerValue remainder = PositiveRemainder(cut_.rhs, best_divisor);
1035 if (ProdOverflow(factor_t, prod)) continue;
1036 if (ProdOverflow(factor_t, CapSubI(remainder, prod))) continue;
1037
1038 const double lp1 =
1039 ToDouble(f(remainder)) - ToDouble(f(entry.coeff)) * entry.lp_value;
1040 const double lp2 = ToDouble(f(remainder - prod)) -
1041 ToDouble(f(-entry.coeff)) *
1042 (ToDouble(entry.bound_diff) - entry.lp_value);
1043 if (lp2 + 1e-2 < lp1) {
1044 entry.Complement(&cut_.rhs);
1045 ++total_num_final_complements_;
1046 }
1047 }
1048 if (total_num_final_complements_ == saved) break;
1049 }
1050
1051 total_num_bumps_ += ApplyWithPotentialBump(f, best_divisor, &cut_);
1052 return true;
1053}
1054
1055CoverCutHelper::~CoverCutHelper() {
1056 if (!VLOG_IS_ON(1)) return;
1057 if (shared_stats_ == nullptr) return;
1058
1059 std::vector<std::pair<std::string, int64_t>> stats;
1060 const auto add_stats = [&stats](absl::string_view name, const CutStats& s) {
1061 stats.push_back(
1062 {absl::StrCat(name, "num_overflows"), s.num_overflow_aborts});
1063 stats.push_back({absl::StrCat(name, "num_lifting"), s.num_lifting});
1064 stats.push_back({absl::StrCat(name, "num_initial_ib"), s.num_initial_ibs});
1065 stats.push_back({absl::StrCat(name, "num_implied_lb"), s.num_lb_ibs});
1066 stats.push_back({absl::StrCat(name, "num_implied_ub"), s.num_ub_ibs});
1067 stats.push_back({absl::StrCat(name, "num_bumps"), s.num_bumps});
1068 stats.push_back({absl::StrCat(name, "num_cuts"), s.num_cuts});
1069 stats.push_back({absl::StrCat(name, "num_merges"), s.num_merges});
1070 };
1071 add_stats("cover_cut/", cover_stats_);
1072 add_stats("flow_cut/", flow_stats_);
1073 add_stats("ls_cut/", ls_stats_);
1074 shared_stats_->AddStats(stats);
1075}
1076
1077namespace {
1078
1079struct LargeCoeffFirst {
1080 bool operator()(const CutTerm& a, const CutTerm& b) const {
1081 if (a.coeff == b.coeff) {
1082 return a.LpDistToMaxValue() > b.LpDistToMaxValue();
1083 }
1084 return a.coeff > b.coeff;
1085 }
1086};
1087
1088struct SmallContribFirst {
1089 bool operator()(const CutTerm& a, const CutTerm& b) const {
1090 const double contrib_a = a.lp_value * static_cast<double>(a.coeff.value());
1091 const double contrib_b = b.lp_value * static_cast<double>(b.coeff.value());
1092 return contrib_a < contrib_b;
1093 }
1094};
1095
1096struct LargeContribFirst {
1097 bool operator()(const CutTerm& a, const CutTerm& b) const {
1098 const double contrib_a = a.lp_value * static_cast<double>(a.coeff.value());
1099 const double contrib_b = b.lp_value * static_cast<double>(b.coeff.value());
1100 return contrib_a > contrib_b;
1101 }
1102};
1103
1104// When minimizing a cover we want to remove bad score (large dist) divided by
1105// item size. Note that here we assume item are "boolean" fully taken or not.
1106// for general int we use (lp_dist / bound_diff) / (coeff * bound_diff) which
1107// lead to the same formula as for Booleans.
1108struct KnapsackAdd {
1109 bool operator()(const CutTerm& a, const CutTerm& b) const {
1110 const double contrib_a =
1111 a.LpDistToMaxValue() / static_cast<double>(a.coeff.value());
1112 const double contrib_b =
1113 b.LpDistToMaxValue() / static_cast<double>(b.coeff.value());
1114 return contrib_a < contrib_b;
1115 }
1116};
1117struct KnapsackRemove {
1118 bool operator()(const CutTerm& a, const CutTerm& b) const {
1119 const double contrib_a =
1120 a.LpDistToMaxValue() / static_cast<double>(a.coeff.value());
1121 const double contrib_b =
1122 b.LpDistToMaxValue() / static_cast<double>(b.coeff.value());
1123 return contrib_a > contrib_b;
1124 }
1125};
1126
1127} // namespace.
1128
1129// Transform to a minimal cover. We want to greedily remove the largest coeff
1130// first, so we have more chance for the "lifting" below which can increase
1131// the cut violation. If the coeff are the same, we prefer to remove high
1132// distance from upper bound first.
1133template <class Compare>
1134int CoverCutHelper::MinimizeCover(int cover_size, absl::int128 slack) {
1135 CHECK_GT(slack, 0);
1136 std::sort(cut_.terms.begin(), cut_.terms.begin() + cover_size, Compare());
1137 for (int i = 0; i < cover_size;) {
1138 const CutTerm& t = cut_.terms[i];
1139 const absl::int128 contrib =
1140 absl::int128(t.bound_diff.value()) * absl::int128(t.coeff.value());
1141 if (contrib < slack) {
1142 slack -= contrib;
1143 std::swap(cut_.terms[i], cut_.terms[--cover_size]);
1144 } else {
1145 ++i;
1146 }
1147 }
1148 DCHECK_GT(cover_size, 0);
1149 return cover_size;
1150}
1151
1152template <class CompareAdd, class CompareRemove>
1153int CoverCutHelper::GetCoverSize(int relevant_size) {
1154 if (relevant_size == 0) return 0;
1155
1156 // Take first all at variable at upper bound, and ignore the one at lower
1157 // bound.
1158 int part1 = 0;
1159 for (int i = 0; i < relevant_size;) {
1160 CutTerm& term = cut_.terms[i];
1161 const double dist = term.LpDistToMaxValue();
1162 if (dist < 1e-6) {
1163 // Move to part 1.
1164 std::swap(term, cut_.terms[part1]);
1165 ++i;
1166 ++part1;
1167 } else if (term.lp_value > 1e-6) {
1168 // Keep in part 2.
1169 ++i;
1170 } else {
1171 // Exclude entirely (part 3).
1172 --relevant_size;
1173 std::swap(term, cut_.terms[relevant_size]);
1174 }
1175 }
1176 std::sort(cut_.terms.begin() + part1, cut_.terms.begin() + relevant_size,
1177 CompareAdd());
1178
1179 // We substract the initial rhs to avoid overflow.
1180 CHECK_GE(cut_.rhs, 0);
1181 absl::int128 max_shifted_activity = -cut_.rhs;
1182 absl::int128 shifted_round_up = -cut_.rhs;
1183 int cover_size = 0;
1184 double dist = 0.0;
1185 for (; cover_size < relevant_size; ++cover_size) {
1186 if (max_shifted_activity > 0) break;
1187 const CutTerm& term = cut_.terms[cover_size];
1188 max_shifted_activity += absl::int128(term.coeff.value()) *
1189 absl::int128(term.bound_diff.value());
1190 shifted_round_up += absl::int128(term.coeff.value()) *
1191 std::min(absl::int128(term.bound_diff.value()),
1192 absl::int128(std::ceil(term.lp_value - 1e-6)));
1193 dist += term.LpDistToMaxValue();
1194 }
1195
1196 CHECK_GE(cover_size, 0);
1197 if (shifted_round_up <= 0) {
1198 return 0;
1199 }
1200 return MinimizeCover<CompareRemove>(cover_size, max_shifted_activity);
1201}
1202
1203// Try a simple cover heuristic.
1204// Look for violated CUT of the form: sum (UB - X) or (X - LB) >= 1.
1205int CoverCutHelper::GetCoverSizeForBooleans() {
1206 // Sorting can be slow, so we start by splitting the vector in 3 parts
1207 // [can always be in cover, candidates, can never be in cover].
1208 int part1 = 0;
1209 int relevant_size = cut_.terms.size();
1210 const double threshold = 1.0 - 1.0 / static_cast<double>(relevant_size);
1211 for (int i = 0; i < relevant_size;) {
1212 const double lp_value = cut_.terms[i].lp_value;
1213
1214 // Exclude non-Boolean.
1215 if (cut_.terms[i].bound_diff > 1) {
1216 --relevant_size;
1217 std::swap(cut_.terms[i], cut_.terms[relevant_size]);
1218 continue;
1219 }
1220
1221 if (lp_value >= threshold) {
1222 // Move to part 1.
1223 std::swap(cut_.terms[i], cut_.terms[part1]);
1224 ++i;
1225 ++part1;
1226 } else if (lp_value >= 0.001) {
1227 // Keep in part 2.
1228 ++i;
1229 } else {
1230 // Exclude entirely (part 3).
1231 --relevant_size;
1232 std::swap(cut_.terms[i], cut_.terms[relevant_size]);
1233 }
1234 }
1235
1236 // Sort by decreasing Lp value.
1237 std::sort(cut_.terms.begin() + part1, cut_.terms.begin() + relevant_size,
1238 [](const CutTerm& a, const CutTerm& b) {
1239 if (a.lp_value == b.lp_value) {
1240 // Prefer low coefficients if the distance is the same.
1241 return a.coeff < b.coeff;
1242 }
1243 return a.lp_value > b.lp_value;
1244 });
1245
1246 double activity = 0.0;
1247 int cover_size = relevant_size;
1248 absl::int128 slack = -cut_.rhs;
1249 for (int i = 0; i < relevant_size; ++i) {
1250 const CutTerm& term = cut_.terms[i];
1251 activity += term.LpDistToMaxValue();
1252
1253 // As an heuristic we select all the term so that the sum of distance
1254 // to the upper bound is <= 1.0. If the corresponding slack is positive,
1255 // then we will have a cut of violation at least 0.0. Note that this
1256 // violation can be improved by the lifting.
1257 //
1258 // TODO(user): experiment with different threshold (even greater than one).
1259 // Or come up with an algo that incorporate the lifting into the heuristic.
1260 if (activity > 0.9999) {
1261 cover_size = i; // before this entry.
1262 break;
1263 }
1264
1265 // TODO(user): Stop if we overflow int128 max! Note that because we scale
1266 // things so that the max coeff is 2^52, this is unlikely.
1267 slack += absl::int128(term.coeff.value()) *
1268 absl::int128(term.bound_diff.value());
1269 }
1270
1271 // If the rhs is now negative, we have a cut.
1272 //
1273 // Note(user): past this point, now that a given "base" cover has been chosen,
1274 // we basically compute the cut (of the form sum X <= bound) with the maximum
1275 // possible violation. Note also that we lift as much as possible, so we don't
1276 // necessarily optimize for the cut efficacity though. But we do get a
1277 // stronger cut.
1278 if (slack <= 0) return 0;
1279 if (cover_size == 0) return 0;
1280 return MinimizeCover<LargeCoeffFirst>(cover_size, slack);
1281}
1282
1283void CoverCutHelper::InitializeCut(const CutData& input_ct) {
1284 num_lifting_ = 0;
1285 cut_ = input_ct;
1286
1287 // We should have dealt with an infeasible constraint before.
1288 // Note that because of our scaling, it is unlikely we will overflow int128.
1289 CHECK_GE(cut_.rhs, 0);
1290 DCHECK(cut_.AllCoefficientsArePositive());
1291}
1292
1293bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct,
1294 ImpliedBoundsProcessor* ib_processor) {
1295 InitializeCut(input_ct);
1296
1297 // Tricky: This only work because the cut absl128 rhs is not changed by these
1298 // operations.
1299 if (ib_processor != nullptr) {
1300 ib_processor->BaseCutBuilder()->ClearNumMerges();
1301 const int old_size = static_cast<int>(cut_.terms.size());
1302 for (int i = 0; i < old_size; ++i) {
1303 // We only look at non-Boolean with an lp value not close to the upper
1304 // bound.
1305 const CutTerm& term = cut_.terms[i];
1306 if (term.bound_diff <= 1) continue;
1307 if (term.lp_value + 1e-4 > AsDouble(term.bound_diff)) continue;
1308
1309 if (ib_processor->TryToExpandWithLowerImpliedbound(
1310 IntegerValue(1), i,
1311 /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) {
1312 ++cover_stats_.num_initial_ibs;
1313 }
1314 }
1315 }
1316
1317 bool has_relevant_int = false;
1318 for (const CutTerm& term : cut_.terms) {
1319 if (term.HasRelevantLpValue() && term.bound_diff > 1) {
1320 has_relevant_int = true;
1321 break;
1322 }
1323 }
1324
1325 const int base_size = static_cast<int>(cut_.terms.size());
1326 const int cover_size =
1327 has_relevant_int
1328 ? GetCoverSize<LargeContribFirst, LargeCoeffFirst>(base_size)
1329 : GetCoverSizeForBooleans();
1330 if (!has_relevant_int && ib_processor == nullptr) {
1331 // If some implied bound substitution are possible, we do not cache anything
1332 // currently because the logic is currently sighlty different betweent the
1333 // two code. Fix?
1334 has_bool_base_ct_ = true;
1335 bool_base_ct_ = cut_;
1336 bool_cover_size_ = cover_size;
1337 }
1338 if (cover_size == 0) return false;
1339
1340 // The cut is just obtained by complementing the variable in the cover and
1341 // applying the MIR super additive function.
1342 //
1343 // Note that since all coeff in the cover will now be negative. If we do no
1344 // scaling, and if we use max_coeff_in_cover to construct f(), they will be
1345 // mapped by f() to -1 and we get the classical cover inequality. With scaling
1346 // we can get a strictly dominating cut though.
1347 //
1348 // TODO(user): we don't have to pick max_coeff_in_cover and could use the
1349 // coefficient of the most fractional variable. Or like in the MIR code try
1350 // a few of them. Currently, the cut in the test is worse if we don't take
1351 // the max_coeff_in_cover though, so we need more understanding.
1352 //
1353 // TODO(user): It seems we could use a more advanced lifting function
1354 // described later in the paper. Investigate.
1355 IntegerValue best_coeff = 0;
1356 double best_score = -1.0;
1357 IntegerValue max_coeff_in_cover(0);
1358 for (int i = 0; i < cover_size; ++i) {
1359 CutTerm& term = cut_.terms[i];
1360 max_coeff_in_cover = std::max(max_coeff_in_cover, term.coeff);
1361 const double score =
1362 std::abs(term.lp_value - std::floor(term.lp_value + 1e-6)) *
1363 ToDouble(term.coeff);
1364 if (score > best_score) {
1365 best_score = score;
1366 best_coeff = term.coeff;
1367 }
1368 term.Complement(&cut_.rhs);
1369 }
1370 CHECK_LT(cut_.rhs, 0); // Because we complemented a cover.
1371
1372 // TODO(user): Experiment without this line that basically disable scoring.
1373 best_coeff = max_coeff_in_cover;
1374
1375 // TODO(user): experiment with different value of scaling and param t.
1376 std::function<IntegerValue(IntegerValue)> f;
1377 {
1378 IntegerValue max_magnitude = 0;
1379 for (const CutTerm& term : cut_.terms) {
1380 max_magnitude = std::max(max_magnitude, IntTypeAbs(term.coeff));
1381 }
1382 const IntegerValue max_scaling(std::min(
1383 IntegerValue(6000), FloorRatio(kMaxIntegerValue, max_magnitude)));
1384 const IntegerValue remainder = PositiveRemainder(cut_.rhs, best_coeff);
1385 f = GetSuperAdditiveRoundingFunction(remainder, best_coeff, IntegerValue(1),
1386 max_scaling);
1387 }
1388
1389 if (ib_processor != nullptr) {
1390 const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound(
1391 f, /*factor_t=*/1, &cut_, &cut_builder_);
1392 cover_stats_.num_lb_ibs += num_lb;
1393 cover_stats_.num_ub_ibs += num_ub;
1394 cover_stats_.num_merges += cut_builder_.NumMergesSinceLastClear();
1395 }
1396
1397 cover_stats_.num_bumps += ApplyWithPotentialBump(f, best_coeff, &cut_);
1398
1399 // Update counters.
1400 for (int i = cover_size; i < cut_.terms.size(); ++i) {
1401 if (cut_.terms[i].coeff != 0) ++num_lifting_;
1402 }
1403 cover_stats_.num_lifting += num_lifting_;
1404 ++cover_stats_.num_cuts;
1405 return true;
1406}
1407
1408bool CoverCutHelper::TrySingleNodeFlow(const CutData& input_ct,
1409 ImpliedBoundsProcessor* ib_processor) {
1410 InitializeCut(input_ct);
1411
1412 // TODO(user): Change the heuristic to depends on the lp_value of the implied
1413 // bounds. This way we can exactly match what happen in the old
1414 // FlowCoverCutHelper.
1415 const int base_size = static_cast<int>(cut_.terms.size());
1416 const int cover_size = GetCoverSize<KnapsackAdd, KnapsackRemove>(base_size);
1417 if (cover_size == 0) return false;
1418
1419 // After complementing the term in the cover, we have
1420 // sum -ci.X + other_terms <= -slack;
1421 for (int i = 0; i < cover_size; ++i) {
1422 cut_.terms[i].Complement(&cut_.rhs);
1423
1424 // We do not support complex terms, but we shouldn't get any.
1425 if (cut_.terms[i].expr_coeffs[1] != 0) return false;
1426 }
1427
1428 // The algorithm goes as follow:
1429 // - Compute heuristically a minimal cover.
1430 // - We have sum_cover ci.Xi >= slack where Xi is distance to upper bound.
1431 // - Apply coefficient strenghtening if ci > slack.
1432 //
1433 // Using implied bound we have two cases (all coeffs positive):
1434 // 1/ ci.Xi = ci.fi.Bi + ci.Si : always good.
1435 // 2/ ci.Xi = ci.di.Bi - ci.Si <= di.Bi: good if Si lp_value is zero.
1436 //
1437 // Note that if everything is Boolean, we just get a normal cover and coeff
1438 // strengthening just result in all coeff at 1, so worse than our cover
1439 // heuristic.
1440 CHECK_LT(cut_.rhs, 0);
1441 if (cut_.rhs <= absl::int128(std::numeric_limits<int64_t>::min())) {
1442 return false;
1443 }
1444
1445 bool has_large_coeff = false;
1446 for (const CutTerm& term : cut_.terms) {
1447 if (IntTypeAbs(term.coeff) > 1'000'000) {
1448 has_large_coeff = true;
1449 break;
1450 }
1451 }
1452
1453 // TODO(user): Shouldn't we just use rounding f() with maximum coeff to allows
1454 // lift of all other terms? but then except for the heuristic the cut is
1455 // really similar to the cover cut.
1456 const IntegerValue positive_rhs = -static_cast<int64_t>(cut_.rhs);
1457 IntegerValue min_magnitude = kMaxIntegerValue;
1458 for (int i = 0; i < cover_size; ++i) {
1459 const IntegerValue magnitude = IntTypeAbs(cut_.terms[i].coeff);
1460 min_magnitude = std::min(min_magnitude, magnitude);
1461 }
1462 const bool use_scaling =
1463 has_large_coeff || min_magnitude == 1 || min_magnitude >= positive_rhs;
1464 auto f = use_scaling ? GetSuperAdditiveStrengtheningMirFunction(
1465 positive_rhs, /*scaling=*/6000)
1467 min_magnitude);
1468
1469 if (ib_processor != nullptr) {
1470 const auto [num_lb, num_ub] = ib_processor->PostprocessWithImpliedBound(
1471 f, /*factor_t=*/1, &cut_, &cut_builder_);
1472 flow_stats_.num_lb_ibs += num_lb;
1473 flow_stats_.num_ub_ibs += num_ub;
1474 flow_stats_.num_merges += cut_builder_.NumMergesSinceLastClear();
1475 }
1476
1477 // Lifting.
1478 {
1479 IntegerValue period = positive_rhs;
1480 for (const CutTerm& term : cut_.terms) {
1481 if (term.coeff > 0) continue;
1482 period = std::max(period, -term.coeff);
1483 }
1484
1485 // Compute good period.
1486 // We don't want to extend it in the simpler case where f(x)=-1 if x < 0.
1487 //
1488 // TODO(user): If the Mir*() function is used, we don't need to extend that
1489 // much the period. Fix.
1490 if (f(-period + FloorRatio(period, 2)) != f(-period)) {
1491 // TODO(user): do exact binary search to find highest x in
1492 // [-max_neg_magnitude, 0] such that f(x) == f(-max_neg_magnitude) ? not
1493 // really needed though since we know that we have this equality:
1494 CHECK_EQ(f(-period), f(-positive_rhs));
1495 period = std::max(period, CapProdI(2, positive_rhs) - 1);
1496 }
1497
1498 f = ExtendNegativeFunction(f, period);
1499 }
1500
1501 // Generate the cut.
1502 cut_.rhs = absl::int128(f(-positive_rhs).value());
1503 for (CutTerm& term : cut_.terms) {
1504 const IntegerValue old_coeff = term.coeff;
1505 term.coeff = f(term.coeff);
1506 if (old_coeff > 0 && term.coeff != 0) ++flow_stats_.num_lifting;
1507 }
1508 ++flow_stats_.num_cuts;
1509 return true;
1510}
1511
1512bool CoverCutHelper::TryWithLetchfordSouliLifting(
1513 const CutData& input_ct, ImpliedBoundsProcessor* ib_processor) {
1514 int cover_size;
1515 if (has_bool_base_ct_) {
1516 // We already called GetCoverSizeForBooleans() and ib_processor was nullptr,
1517 // so reuse that info.
1518 CHECK(ib_processor == nullptr);
1519 InitializeCut(bool_base_ct_);
1520 cover_size = bool_cover_size_;
1521 } else {
1522 InitializeCut(input_ct);
1523
1524 // Perform IB expansion with no restriction, all coeff should still be
1525 // positive.
1526 //
1527 // TODO(user): Merge Boolean terms that are complement of each other.
1528 if (ib_processor != nullptr) {
1529 ib_processor->BaseCutBuilder()->ClearNumMerges();
1530 const int old_size = static_cast<int>(cut_.terms.size());
1531 for (int i = 0; i < old_size; ++i) {
1532 if (cut_.terms[i].bound_diff <= 1) continue;
1533 if (ib_processor->TryToExpandWithLowerImpliedbound(
1534 IntegerValue(1), i,
1535 /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) {
1536 ++ls_stats_.num_initial_ibs;
1537 }
1538 }
1539 }
1540
1541 // TODO(user): we currently only deal with Boolean in the cover. Fix.
1542 cover_size = GetCoverSizeForBooleans();
1543 }
1544 if (cover_size == 0) return false;
1545
1546 // We don't support big rhs here.
1547 // Note however than since this only deal with Booleans, it is less likely.
1548 if (cut_.rhs > absl::int128(std::numeric_limits<int64_t>::max())) {
1549 ++ls_stats_.num_overflow_aborts;
1550 return false;
1551 }
1552 const IntegerValue rhs = static_cast<int64_t>(cut_.rhs);
1553
1554 // Collect the weight in the cover.
1555 IntegerValue sum(0);
1556 std::vector<IntegerValue> cover_weights;
1557 for (int i = 0; i < cover_size; ++i) {
1558 CHECK_EQ(cut_.terms[i].bound_diff, 1);
1559 CHECK_GT(cut_.terms[i].coeff, 0);
1560 cover_weights.push_back(cut_.terms[i].coeff);
1561 sum = CapAddI(sum, cut_.terms[i].coeff);
1562 }
1563 if (AtMinOrMaxInt64(sum.value())) {
1564 ++ls_stats_.num_overflow_aborts;
1565 return false;
1566 }
1567 CHECK_GT(sum, rhs);
1568
1569 // Compute the correct threshold so that if we round down larger weights to
1570 // p/q. We have sum of the weight in cover == base_rhs.
1571 IntegerValue p(0);
1572 IntegerValue q(0);
1573 IntegerValue previous_sum(0);
1574 std::sort(cover_weights.begin(), cover_weights.end());
1575 for (int i = 0; i < cover_size; ++i) {
1576 q = IntegerValue(cover_weights.size() - i);
1577 if (previous_sum + cover_weights[i] * q > rhs) {
1578 p = rhs - previous_sum;
1579 break;
1580 }
1581 previous_sum += cover_weights[i];
1582 }
1583 CHECK_GE(q, 1);
1584
1585 // Compute thresholds.
1586 // For the first q values, thresholds[i] is the smallest integer such that
1587 // q * threshold[i] > p * (i + 1).
1588 std::vector<IntegerValue> thresholds;
1589 for (int i = 0; i < q; ++i) {
1590 // TODO(user): compute this in an overflow-safe way.
1591 if (CapProd(p.value(), i + 1) >= std::numeric_limits<int64_t>::max() - 1) {
1592 ++ls_stats_.num_overflow_aborts;
1593 return false;
1594 }
1595 thresholds.push_back(CeilRatio(p * (i + 1) + 1, q));
1596 }
1597
1598 // For the other values, we just add the weights.
1599 std::reverse(cover_weights.begin(), cover_weights.end());
1600 for (int i = q.value(); i < cover_size; ++i) {
1601 thresholds.push_back(thresholds.back() + cover_weights[i]);
1602 }
1603 CHECK_EQ(thresholds.back(), rhs + 1);
1604
1605 // Generate the cut.
1606 //
1607 // Our algo is quadratic in worst case, but large coefficients should be
1608 // rare, and in practice we don't really see this.
1609 //
1610 // Note that this work for non-Boolean since we can just "theorically" split
1611 // them as a sum of Booleans :) Probably a cleaner proof exist by just using
1612 // the super-additivity of the lifting function on [0, rhs].
1613 temp_cut_.rhs = cover_size - 1;
1614 temp_cut_.terms.clear();
1615
1616 const int base_size = static_cast<int>(cut_.terms.size());
1617 for (int i = 0; i < base_size; ++i) {
1618 const CutTerm& term = cut_.terms[i];
1619 const IntegerValue coeff = term.coeff;
1620 IntegerValue cut_coeff(1);
1621 if (coeff < thresholds[0]) {
1622 if (i >= cover_size) continue;
1623 } else {
1624 // Find the largest index <= coeff.
1625 //
1626 // TODO(user): For exact multiple of p/q we can increase the coeff by 1/2.
1627 // See section in the paper on getting maximal super additive function.
1628 for (int i = 1; i < cover_size; ++i) {
1629 if (coeff < thresholds[i]) break;
1630 cut_coeff = IntegerValue(i + 1);
1631 }
1632 if (cut_coeff != 0 && i >= cover_size) ++ls_stats_.num_lifting;
1633 if (cut_coeff > 1 && i < cover_size) ++ls_stats_.num_lifting; // happen?
1634 }
1635
1636 temp_cut_.terms.push_back(term);
1637 temp_cut_.terms.back().coeff = cut_coeff;
1638 }
1639
1640 cut_ = temp_cut_;
1641 ++ls_stats_.num_cuts;
1642 return true;
1643}
1644
1645BoolRLTCutHelper::~BoolRLTCutHelper() {
1646 if (!VLOG_IS_ON(1)) return;
1647 std::vector<std::pair<std::string, int64_t>> stats;
1648 stats.push_back({"bool_rlt/num_tried", num_tried_});
1649 stats.push_back({"bool_rlt/num_tried_factors", num_tried_factors_});
1650 shared_stats_->AddStats(stats);
1651}
1652
1653void BoolRLTCutHelper::Initialize(
1654 const absl::flat_hash_map<IntegerVariable, glop::ColIndex>& lp_vars) {
1655 product_detector_->InitializeBooleanRLTCuts(lp_vars, *lp_values_);
1656 enabled_ = !product_detector_->BoolRLTCandidates().empty();
1657}
1658
1659// TODO(user): do less work, add more stats.
1660bool BoolRLTCutHelper::TrySimpleSeparation(const CutData& input_ct) {
1661 if (!enabled_) return false;
1662
1663 ++num_tried_;
1664 DCHECK(input_ct.AllCoefficientsArePositive());
1665
1666 // We will list the interesting "factor" to try to multiply + linearize the
1667 // input constraint with.
1668 absl::flat_hash_set<IntegerVariable> to_try_set;
1669 std::vector<IntegerVariable> to_try;
1670
1671 // We can look at the linearized factor * term and bound the activity delta
1672 // rescaled by 1 / factor.
1673 //
1674 // CASE linearized_term delta = term/factor - previous
1675 //
1676 // DROP, 0 0 - X
1677 // MC_CORMICK, factor * ub - (ub - X) (ub - X) * (1 - 1/factor) <= 0
1678 // SQUARE, factor=X 1 - X
1679 // RLT, factor - P 1 - X - P/X <= 1 - X
1680 //
1681 // TODO(user): detect earlier that a factor is not worth checking because
1682 // we already loose too much with the DROP/MC_CORMICK cases ? Filter more ?
1683 // I think we can probably evaluate the factor efficiency during the first
1684 // loop which usually have a small complexity compared to num_factor_to_try
1685 // times num filtered terms.
1686 filtered_input_.terms.clear();
1687 filtered_input_.rhs = input_ct.rhs;
1688
1689 const auto& candidates = product_detector_->BoolRLTCandidates();
1690 for (const CutTerm& term : input_ct.terms) {
1691 // The only options are DROP or MC_CORMICK, but the later will unlikely win.
1692 //
1693 // TODO(user): we never use factor with lp value < 1e-4, but we could use a
1694 // factor equal to 1.0 I think. Double check.
1695 if (!term.IsBoolean() && term.lp_value <= 1e-6) {
1696 continue;
1697 }
1698
1699 // Here the MC_CORMICK will not loose much. And SQUARE or RLT cannot win
1700 // much, so we can assume there is no loss and just look for violated
1701 // subconstraint.
1702 if (term.LpDistToMaxValue() <= 1e-6) {
1703 filtered_input_.rhs -= absl::int128(term.coeff.value()) *
1704 absl::int128(term.bound_diff.value());
1705 continue;
1706 }
1707
1708 // Convert to var or -var (to mean 1 - var).
1709 //
1710 // TODO(user): We could keep for each factor the max gain, so that we
1711 // can decided if it is not even worth trying a factor.
1712 const IntegerVariable var = term.GetUnderlyingLiteralOrNone();
1713 if (var != kNoIntegerVariable && candidates.contains(NegationOf(var))) {
1714 for (const IntegerVariable factor : candidates.at(NegationOf(var))) {
1715 if (to_try_set.insert(factor).second) to_try.push_back(factor);
1716 }
1717 }
1718 filtered_input_.terms.push_back(term);
1719 }
1720
1721 // TODO(user): Avoid constructing the cut just to evaluate its efficacy.
1722 double best_efficacy = 1e-3;
1723 IntegerVariable best_factor = kNoIntegerVariable;
1724 for (const IntegerVariable factor : to_try) {
1725 ++num_tried_factors_;
1726 if (!TryProduct(factor, filtered_input_)) continue;
1727 const double efficacy = cut_.ComputeEfficacy();
1728 if (efficacy > best_efficacy) {
1729 best_efficacy = efficacy;
1730 best_factor = factor;
1731 }
1732 }
1733
1734 // If we found a good factor, applies it to the non-filtered base constraint.
1735 if (best_factor != kNoIntegerVariable) {
1736 return TryProduct(best_factor, input_ct);
1737 }
1738 return false;
1739}
1740
1741namespace {
1742
1743// Each bool * term can be linearized in a couple of way.
1744// We will choose the best one.
1745enum class LinearizationOption {
1746 DROP,
1747 MC_CORMICK,
1748 RLT,
1749 SQUARE,
1750};
1751
1752} // namespace
1753
1754double BoolRLTCutHelper::GetLiteralLpValue(IntegerVariable var) const {
1755 if (VariableIsPositive(var)) return (*lp_values_)[var];
1756 return 1.0 - (*lp_values_)[PositiveVariable(var)];
1757}
1758
1759bool BoolRLTCutHelper::TryProduct(IntegerVariable factor,
1760 const CutData& input) {
1761 cut_.terms.clear();
1762 cut_.rhs = 0;
1763 absl::int128 old_rhs = input.rhs;
1764
1765 const double factor_lp = GetLiteralLpValue(factor);
1766
1767 // Consider each term in sequence and linearize them.
1768 for (CutTerm term : input.terms) {
1769 LinearizationOption best_option = LinearizationOption::DROP;
1770
1771 // Recover the "IntegerVariable" literal if any.
1772 const IntegerVariable var = term.GetUnderlyingLiteralOrNone();
1773 const bool is_literal = var != kNoIntegerVariable;
1774
1775 // First option is if factor == var.
1776 if (factor == var) {
1777 // The term can be left unchanged.
1778 // Note that we "win" (factor_lp - factor_lp ^ 2) * coeff activity.
1779 best_option = LinearizationOption::SQUARE;
1780 cut_.terms.push_back(term);
1781 continue;
1782 }
1783
1784 // If factor == NegationOf(var) our best linearization is unfortunately
1785 // just product >= 0.
1786 if (NegationOf(factor) == var) {
1787 best_option = LinearizationOption::DROP;
1788 continue;
1789 }
1790
1791 // TODO(user): If l implies x and y, we have x * y >= l.
1792 // We have to choose l as high as possible if multiple choices.
1793
1794 // We start by the lp value for the drop option: simply dropping the term
1795 // since we know it is >= 0. We will choose the option with the highest
1796 // lp value, which is the one that "loose" the least activity.
1797 double best_lp = 0.0;
1798
1799 // Second option, is complement it and use x * (1 - y) <= (1 - y):
1800 // x * [1 - (1 - y)] = x - x * (1 - y) >= x - (1 - y)
1801 const double complement_lp =
1802 static_cast<double>(term.bound_diff.value()) - term.lp_value;
1803 const double mc_cormick_lp = factor_lp - complement_lp;
1804 if (mc_cormick_lp > best_lp) {
1805 best_option = LinearizationOption::MC_CORMICK;
1806 best_lp = mc_cormick_lp;
1807 }
1808
1809 // Last option is complement it and use a relation x * (1 - y) <= u.
1810 // so the lp is x - u. Note that this can be higher than x * y if the
1811 // bilinear relation is violated by the lp solution.
1812 if (is_literal) {
1813 // TODO(user): only consider variable within current lp.
1814 const IntegerVariable ub_lit =
1815 product_detector_->LiteralProductUpperBound(factor, NegationOf(var));
1816 if (ub_lit != kNoIntegerVariable) {
1817 const double lit_lp = GetLiteralLpValue(ub_lit);
1818 if (factor_lp - lit_lp > best_lp) {
1819 // We do it right away since we have all we need.
1820 best_option = LinearizationOption::RLT;
1821
1822 // First complement to update rhs.
1823 term.Complement(&old_rhs);
1824
1825 // Now we replace the term data.
1826 term.lp_value = lit_lp;
1827 term.ReplaceExpressionByLiteral(ub_lit);
1828 cut_.terms.push_back(term);
1829 continue;
1830 }
1831 }
1832 }
1833
1834 if (best_option == LinearizationOption::DROP) continue;
1835
1836 // We just keep the complemented term.
1837 CHECK(best_option == LinearizationOption::MC_CORMICK);
1838 term.Complement(&old_rhs);
1839 cut_.terms.push_back(term);
1840 }
1841
1842 // Finally we add the - factor * old_rhs term.
1843 // We can only do that if the old_rhs is not too big.
1844 //
1845 // TODO(user): Avoid right away large constraint coming from gomory...
1846 const absl::int128 limit(int64_t{1} << 50);
1847 if (old_rhs > limit || old_rhs < -limit) return false;
1848
1849 CutTerm term;
1850 term.coeff = -IntegerValue(static_cast<int64_t>(old_rhs));
1851 term.lp_value = factor_lp;
1852 term.bound_diff = 1;
1853 term.ReplaceExpressionByLiteral(factor);
1854 cut_.terms.push_back(term);
1855
1856 return true;
1857}
1858
1862 int linearization_level,
1863 Model* model) {
1864 CutGenerator result;
1865 if (z.var != kNoIntegerVariable) result.vars.push_back(z.var);
1866 if (x.var != kNoIntegerVariable) result.vars.push_back(x.var);
1867 if (y.var != kNoIntegerVariable) result.vars.push_back(y.var);
1868
1869 IntegerTrail* const integer_trail = model->GetOrCreate<IntegerTrail>();
1870 Trail* trail = model->GetOrCreate<Trail>();
1871
1872 result.generate_cuts = [z, x, y, linearization_level, model, trail,
1873 integer_trail](LinearConstraintManager* manager) {
1874 if (trail->CurrentDecisionLevel() > 0 && linearization_level == 1) {
1875 return true;
1876 }
1877 const int64_t x_lb = integer_trail->LevelZeroLowerBound(x).value();
1878 const int64_t x_ub = integer_trail->LevelZeroUpperBound(x).value();
1879 const int64_t y_lb = integer_trail->LevelZeroLowerBound(y).value();
1880 const int64_t y_ub = integer_trail->LevelZeroUpperBound(y).value();
1881
1882 // if x or y are fixed, the McCormick equations are exact.
1883 if (x_lb == x_ub || y_lb == y_ub) return true;
1884
1885 // Check for overflow with the product of expression bounds and the
1886 // product of one expression bound times the constant part of the other
1887 // expression.
1888 const int64_t x_max_amp = std::max(std::abs(x_lb), std::abs(x_ub));
1889 const int64_t y_max_amp = std::max(std::abs(y_lb), std::abs(y_ub));
1890 constexpr int64_t kMaxSafeInteger = (int64_t{1} << 53) - 1;
1891 if (CapProd(y_max_amp, x_max_amp) > kMaxSafeInteger) return true;
1892 if (CapProd(y_max_amp, std::abs(x.constant.value())) > kMaxSafeInteger) {
1893 return true;
1894 }
1895 if (CapProd(x_max_amp, std::abs(y.constant.value())) > kMaxSafeInteger) {
1896 return true;
1897 }
1898
1899 const auto& lp_values = manager->LpValues();
1900 const double x_lp_value = x.LpValue(lp_values);
1901 const double y_lp_value = y.LpValue(lp_values);
1902 const double z_lp_value = z.LpValue(lp_values);
1903
1904 // TODO(user): As the bounds change monotonically, these cuts
1905 // dominate any previous one. try to keep a reference to the cut and
1906 // replace it. Alternatively, add an API for a level-zero bound change
1907 // callback.
1908
1909 // Cut -z + x_coeff * x + y_coeff* y <= rhs
1910 auto try_add_above_cut = [&](int64_t x_coeff, int64_t y_coeff,
1911 int64_t rhs) {
1912 if (-z_lp_value + x_lp_value * x_coeff + y_lp_value * y_coeff >=
1913 rhs + kMinCutViolation) {
1915 /*ub=*/IntegerValue(rhs));
1916 cut.AddTerm(z, IntegerValue(-1));
1917 if (x_coeff != 0) cut.AddTerm(x, IntegerValue(x_coeff));
1918 if (y_coeff != 0) cut.AddTerm(y, IntegerValue(y_coeff));
1919 manager->AddCut(cut.Build(), "PositiveProduct");
1920 }
1921 };
1922
1923 // Cut -z + x_coeff * x + y_coeff* y >= rhs
1924 auto try_add_below_cut = [&](int64_t x_coeff, int64_t y_coeff,
1925 int64_t rhs) {
1926 if (-z_lp_value + x_lp_value * x_coeff + y_lp_value * y_coeff <=
1927 rhs - kMinCutViolation) {
1928 LinearConstraintBuilder cut(model, /*lb=*/IntegerValue(rhs),
1929 /*ub=*/kMaxIntegerValue);
1930 cut.AddTerm(z, IntegerValue(-1));
1931 if (x_coeff != 0) cut.AddTerm(x, IntegerValue(x_coeff));
1932 if (y_coeff != 0) cut.AddTerm(y, IntegerValue(y_coeff));
1933 manager->AddCut(cut.Build(), "PositiveProduct");
1934 }
1935 };
1936
1937 // McCormick relaxation of bilinear constraints. These 4 cuts are the
1938 // exact facets of the x * y polyhedron for a bounded x and y.
1939 //
1940 // Each cut correspond to plane that contains two of the line
1941 // (x=x_lb), (x=x_ub), (y=y_lb), (y=y_ub). The easiest to
1942 // understand them is to draw the x*y curves and see the 4
1943 // planes that correspond to the convex hull of the graph.
1944 try_add_above_cut(y_lb, x_lb, x_lb * y_lb);
1945 try_add_above_cut(y_ub, x_ub, x_ub * y_ub);
1946 try_add_below_cut(y_ub, x_lb, x_lb * y_ub);
1947 try_add_below_cut(y_lb, x_ub, x_ub * y_lb);
1948 return true;
1949 };
1950
1951 return result;
1952}
1953
1955 AffineExpression square,
1956 IntegerValue x_lb,
1957 IntegerValue x_ub, Model* model) {
1958 const IntegerValue above_slope = x_ub + x_lb;
1960 -x_lb * x_ub);
1961 above_hyperplan.AddTerm(square, 1);
1962 above_hyperplan.AddTerm(x, -above_slope);
1963 return above_hyperplan.Build();
1964}
1965
1967 AffineExpression square,
1968 IntegerValue x_value,
1969 Model* model) {
1970 const IntegerValue below_slope = 2 * x_value + 1;
1971 LinearConstraintBuilder below_hyperplan(model, -x_value - x_value * x_value,
1973 below_hyperplan.AddTerm(square, 1);
1974 below_hyperplan.AddTerm(x, -below_slope);
1975 return below_hyperplan.Build();
1976}
1977
1979 int linearization_level, Model* model) {
1980 CutGenerator result;
1981 if (x.var != kNoIntegerVariable) result.vars.push_back(x.var);
1982 if (y.var != kNoIntegerVariable) result.vars.push_back(y.var);
1983
1984 Trail* trail = model->GetOrCreate<Trail>();
1985 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
1986 result.generate_cuts = [y, x, linearization_level, trail, integer_trail,
1987 model](LinearConstraintManager* manager) {
1988 if (trail->CurrentDecisionLevel() > 0 && linearization_level == 1) {
1989 return true;
1990 }
1991 const IntegerValue x_ub = integer_trail->LevelZeroUpperBound(x);
1992 const IntegerValue x_lb = integer_trail->LevelZeroLowerBound(x);
1993 DCHECK_GE(x_lb, 0);
1994
1995 if (x_lb == x_ub) return true;
1996
1997 // Check for potential overflows.
1998 if (x_ub > (int64_t{1} << 31)) return true;
1999 DCHECK_GE(x_lb, 0);
2000 manager->AddCut(ComputeHyperplanAboveSquare(x, y, x_lb, x_ub, model),
2001 "SquareUpper");
2002
2003 const IntegerValue x_floor =
2004 static_cast<int64_t>(std::floor(x.LpValue(manager->LpValues())));
2005 manager->AddCut(ComputeHyperplanBelowSquare(x, y, x_floor, model),
2006 "SquareLower");
2007 return true;
2008 };
2009
2010 return result;
2011}
2012
2013ImpliedBoundsProcessor::BestImpliedBoundInfo
2014ImpliedBoundsProcessor::GetCachedImpliedBoundInfo(IntegerVariable var) const {
2015 auto it = cache_.find(var);
2016 if (it != cache_.end()) {
2017 BestImpliedBoundInfo result = it->second;
2018 if (result.bool_var == kNoIntegerVariable) return BestImpliedBoundInfo();
2019 if (integer_trail_->IsFixed(result.bool_var)) return BestImpliedBoundInfo();
2020 return result;
2021 }
2022 return BestImpliedBoundInfo();
2023}
2024
2026ImpliedBoundsProcessor::ComputeBestImpliedBound(
2027 IntegerVariable var,
2029 auto it = cache_.find(var);
2030 if (it != cache_.end()) return it->second;
2031 BestImpliedBoundInfo result;
2032 double result_slack_lp_value = std::numeric_limits<double>::infinity();
2033 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(var);
2034 for (const ImpliedBoundEntry& entry :
2035 implied_bounds_->GetImpliedBounds(var)) {
2036 // Only process entries with a Boolean variable currently part of the LP
2037 // we are considering for this cut.
2038 //
2039 // TODO(user): the more we use cuts, the less it make sense to have a
2040 // lot of small independent LPs.
2041 if (!lp_vars_.contains(PositiveVariable(entry.literal_view))) {
2042 continue;
2043 }
2044
2045 // The equation is X = lb + diff * Bool + Slack where Bool is in [0, 1]
2046 // and slack in [0, ub - lb].
2047 const IntegerValue diff = entry.lower_bound - lb;
2048 CHECK_GE(diff, 0);
2049 const double bool_lp_value = entry.is_positive
2050 ? lp_values[entry.literal_view]
2051 : 1.0 - lp_values[entry.literal_view];
2052 const double slack_lp_value =
2053 lp_values[var] - ToDouble(lb) - bool_lp_value * ToDouble(diff);
2054
2055 // If the implied bound equation is not respected, we just add it
2056 // to implied_bound_cuts, and skip the entry for now.
2057 if (slack_lp_value < -1e-4) {
2058 LinearConstraint ib_cut;
2059 ib_cut.lb = kMinIntegerValue;
2060 std::vector<std::pair<IntegerVariable, IntegerValue>> terms;
2061 if (entry.is_positive) {
2062 // X >= Indicator * (bound - lb) + lb
2063 terms.push_back({entry.literal_view, diff});
2064 terms.push_back({var, IntegerValue(-1)});
2065 ib_cut.ub = -lb;
2066 } else {
2067 // X >= -Indicator * (bound - lb) + bound
2068 terms.push_back({entry.literal_view, -diff});
2069 terms.push_back({var, IntegerValue(-1)});
2070 ib_cut.ub = -entry.lower_bound;
2071 }
2072 CleanTermsAndFillConstraint(&terms, &ib_cut);
2073 ib_cut_pool_.AddCut(std::move(ib_cut), "IB", lp_values);
2074 continue;
2075 }
2076
2077 // We look for tight implied bounds, and amongst the tightest one, we
2078 // prefer larger coefficient in front of the Boolean.
2079 if (slack_lp_value + 1e-4 < result_slack_lp_value ||
2080 (slack_lp_value < result_slack_lp_value + 1e-4 &&
2081 entry.lower_bound > result.implied_bound)) {
2082 result_slack_lp_value = slack_lp_value;
2083 result.var_lp_value = lp_values[var];
2084 result.bool_lp_value = bool_lp_value;
2085 result.implied_bound = entry.lower_bound;
2086 result.is_positive = entry.is_positive;
2087 result.bool_var = entry.literal_view;
2088 }
2089 }
2090 cache_[var] = result;
2091 return result;
2092}
2093
2094void ImpliedBoundsProcessor::RecomputeCacheAndSeparateSomeImpliedBoundCuts(
2096 cache_.clear();
2097 for (const IntegerVariable var :
2098 implied_bounds_->VariablesWithImpliedBounds()) {
2099 if (!lp_vars_.contains(PositiveVariable(var))) continue;
2100 ComputeBestImpliedBound(var, lp_values);
2101 }
2102}
2103
2104bool ImpliedBoundsProcessor::DecomposeWithImpliedLowerBound(
2105 const CutTerm& term, IntegerValue factor_t, CutTerm& bool_term,
2106 CutTerm& slack_term) {
2107 // We only want to expand non-Boolean and non-slack term!
2108 if (term.bound_diff <= 1) return false;
2109 if (!term.IsSimple()) return false;
2110 DCHECK_EQ(IntTypeAbs(term.expr_coeffs[0]), 1);
2111
2112 // Try lower bounded direction for implied bound.
2113 // This kind should always be beneficial if it exists:
2114 //
2115 // Because X = bound_diff * B + S
2116 // We can replace coeff * X by the expression before applying f:
2117 // = f(coeff * bound_diff) * B + f(coeff) * [X - bound_diff * B]
2118 // = f(coeff) * X + (f(coeff * bound_diff) - f(coeff) * bound_diff] * B
2119 // So we can "lift" B into the cut with a non-negative coefficient.
2120 //
2121 // Note that this lifting is really the same as if we used that implied
2122 // bound before since in f(coeff * bound_diff) * B + f(coeff) * S, if we
2123 // replace S by its value [X - bound_diff * B] we get the same result.
2124 //
2125 // TODO(user): Ignore if bound_diff == 1 ? But we can still merge B with
2126 // another entry if it exists, so it can still be good in this case.
2127 //
2128 // TODO(user): Only do it if coeff_b > 0 ? But again we could still merge
2129 // B with an existing Boolean for a better cut even if coeff_b == 0.
2130 if (term.cached_implied_lb < 0) return false;
2131 const BestImpliedBoundInfo info = cached_data_[term.cached_implied_lb];
2132 const IntegerValue lb = -term.expr_offset;
2133 const IntegerValue bound_diff = info.implied_bound - lb;
2134 if (bound_diff <= 0) return false;
2135 if (ProdOverflow(factor_t, CapProdI(term.coeff, bound_diff))) return false;
2136
2137 // We have X/-X = info.diff * Boolean + slack.
2138 bool_term.coeff = term.coeff * bound_diff;
2139 bool_term.expr_vars[0] = info.bool_var;
2140 bool_term.expr_coeffs[1] = 0;
2141 bool_term.bound_diff = IntegerValue(1);
2142 bool_term.lp_value = info.bool_lp_value;
2143 if (info.is_positive) {
2144 bool_term.expr_coeffs[0] = IntegerValue(1);
2145 bool_term.expr_offset = IntegerValue(0);
2146 } else {
2147 bool_term.expr_coeffs[0] = IntegerValue(-1);
2148 bool_term.expr_offset = IntegerValue(1);
2149 }
2150
2151 // Create slack.
2152 // The expression is term.exp - bound_diff * bool_term
2153 // The variable shouldn't be the same.
2154 DCHECK_NE(term.expr_vars[0], bool_term.expr_vars[0]);
2155 slack_term.expr_vars[0] = term.expr_vars[0];
2156 slack_term.expr_coeffs[0] = term.expr_coeffs[0];
2157 slack_term.expr_vars[1] = bool_term.expr_vars[0];
2158 slack_term.expr_coeffs[1] = -bound_diff * bool_term.expr_coeffs[0];
2159 slack_term.expr_offset =
2160 term.expr_offset - bound_diff * bool_term.expr_offset;
2161
2162 slack_term.lp_value = info.SlackLpValue(lb);
2163 slack_term.coeff = term.coeff;
2164 slack_term.bound_diff = term.bound_diff;
2165
2166 return true;
2167}
2168
2169// We use the fact that calling DecomposeWithImpliedLowerBound() with
2170// term.Complement() give us almost what we want. You have
2171// -complement(X) = -diff.B - slack
2172// - (diff - X) = -diff.(1 -(1- B)) - slack
2173// X = diff.(1 - B) - slack;
2174bool ImpliedBoundsProcessor::DecomposeWithImpliedUpperBound(
2175 const CutTerm& term, IntegerValue factor_t, CutTerm& bool_term,
2176 CutTerm& slack_term) {
2177 absl::int128 unused = 0;
2178 CutTerm complement = term;
2179 complement.Complement(&unused);
2180 if (!DecomposeWithImpliedLowerBound(complement, factor_t, bool_term,
2181 slack_term)) {
2182 return false;
2183 }
2184 // This is required not to have a constant term which might mess up our cut
2185 // heuristics.
2186 if (IntTypeAbs(bool_term.coeff) !=
2188 return false;
2189 }
2190 bool_term.Complement(&unused);
2191 CHECK_EQ(unused, absl::int128(0));
2192 return true;
2193}
2194
2195std::pair<int, int> ImpliedBoundsProcessor::PostprocessWithImpliedBound(
2196 const std::function<IntegerValue(IntegerValue)>& f, IntegerValue factor_t,
2197 CutData* cut, CutDataBuilder* builder) {
2198 int num_applied_lb = 0;
2199 int num_applied_ub = 0;
2200
2201 CutTerm bool_term;
2202 CutTerm slack_term;
2203 CutTerm ub_bool_term;
2204 CutTerm ub_slack_term;
2205 builder->ClearIndices();
2206 const int initial_size = cut->terms.size();
2207 for (int i = 0; i < initial_size; ++i) {
2208 CutTerm& term = cut->terms[i];
2209 if (term.bound_diff <= 1) continue;
2210 if (!term.IsSimple()) continue;
2211
2212 // Score is just the final lp value.
2213 // Higher is better since it is a <= constraint.
2214 double base_score;
2215 bool expand = false;
2216 if (DecomposeWithImpliedLowerBound(term, factor_t, bool_term, slack_term)) {
2217 // This side is always good.
2218 // c.X = c.d.B + c.S
2219 // applying f to the result we have f(c.d).B + f(c).[X - d.B]
2220 // which give f(c).X + [f(c.d) - f(c).d].B
2221 // and the second term is always positive by super-additivity.
2222 expand = true;
2223 base_score = AsDouble(f(bool_term.coeff)) * bool_term.lp_value +
2224 AsDouble(f(slack_term.coeff)) * slack_term.lp_value;
2225 } else {
2226 base_score = AsDouble(f(term.coeff)) * term.lp_value;
2227 }
2228
2229 // Test if it is better to use this "bad" side.
2230 //
2231 // Use the implied bound on (-X) if it is beneficial to do so.
2232 // Like complementing, this is not always good.
2233 //
2234 // We have comp(X) = diff - X = diff * B + S
2235 // X = diff * (1 - B) - S.
2236 // So if we applies f, we will get:
2237 // f(coeff * diff) * (1 - B) + f(-coeff) * S
2238 // and substituing S = diff * (1 - B) - X, we get:
2239 // -f(-coeff) * X + [f(coeff * diff) + f(-coeff) * diff] (1 - B).
2240 //
2241 // TODO(user): Note that while the violation might be higher, if the slack
2242 // becomes large this will result in a less powerfull cut. Shall we do
2243 // that? It is a bit the same problematic with complementing.
2244 //
2245 // TODO(user): If the slack is close to zero, then this transformation
2246 // will always increase the violation. So we could potentially do it in
2247 // Before our divisor selection heuristic. But the norm of the final cut
2248 // will increase too.
2249 if (DecomposeWithImpliedUpperBound(term, factor_t, ub_bool_term,
2250 ub_slack_term)) {
2251 const double score =
2252 AsDouble(f(ub_bool_term.coeff)) * ub_bool_term.lp_value +
2253 AsDouble(f(ub_slack_term.coeff)) * ub_slack_term.lp_value;
2254 // Note that because the slack is of the opposite sign, we might
2255 // loose more, so we prefer to be a bit defensive.
2256 if (score > base_score + 1e-2) {
2257 ++num_applied_ub;
2258 term = ub_slack_term; // Override first before push_back() !
2259 builder->AddOrMergeTerm(ub_bool_term, factor_t, cut);
2260 continue;
2261 }
2262 }
2263
2264 if (expand) {
2265 ++num_applied_lb;
2266 term = slack_term; // Override first before push_back() !
2267 builder->AddOrMergeTerm(bool_term, factor_t, cut);
2268 }
2269 }
2270 return {num_applied_lb, num_applied_ub};
2271}
2272
2273// Important: The cut_builder_ must have been reset.
2274bool ImpliedBoundsProcessor::TryToExpandWithLowerImpliedbound(
2275 IntegerValue factor_t, int i, bool complement, CutData* cut,
2276 CutDataBuilder* builder) {
2277 CutTerm& term = cut->terms[i];
2278
2279 CutTerm bool_term;
2280 CutTerm slack_term;
2281 if (!DecomposeWithImpliedLowerBound(term, factor_t, bool_term, slack_term)) {
2282 return false;
2283 }
2284
2285 // It should be good to use IB, but sometime we have things like
2286 // 7.3 = 2 * bool@1 + 5.3 and the expanded Boolean is at its upper bound.
2287 // It is always good to complement such variable.
2288 //
2289 // Note that here we do more and just complement anything closer to UB.
2290 //
2291 // TODO(user): Because of merges, we might have entry with a coefficient of
2292 // zero than are not useful. Remove them.
2293 if (complement) {
2294 if (bool_term.lp_value > 0.5) {
2295 bool_term.Complement(&cut->rhs);
2296 }
2297 if (slack_term.lp_value > 0.5 * AsDouble(slack_term.bound_diff)) {
2298 slack_term.Complement(&cut->rhs);
2299 }
2300 }
2301
2302 term = slack_term;
2303 builder->AddOrMergeTerm(bool_term, factor_t, cut);
2304 return true;
2305}
2306
2307bool ImpliedBoundsProcessor::CacheDataForCut(IntegerVariable first_slack,
2308 CutData* cut) {
2309 base_cut_builder_.ClearIndices();
2310 cached_data_.clear();
2311
2312 const int size = cut->terms.size();
2313 for (int i = 0; i < size; ++i) {
2314 const CutTerm& term = cut->terms[i];
2315 if (!term.IsSimple()) continue;
2316 if (term.IsBoolean()) continue;
2317 if (term.expr_vars[0] >= first_slack) continue;
2318
2319 // Cache the BestImpliedBoundInfo if relevant.
2320 const IntegerVariable ib_var = term.expr_coeffs[0] > 0
2321 ? term.expr_vars[0]
2322 : NegationOf(term.expr_vars[0]);
2323 BestImpliedBoundInfo lb_info = GetCachedImpliedBoundInfo(ib_var);
2324 if (lb_info.bool_var != kNoIntegerVariable) {
2325 cut->terms[i].cached_implied_lb = cached_data_.size();
2326 cached_data_.emplace_back(std::move(lb_info));
2327 }
2328 BestImpliedBoundInfo ub_info =
2329 GetCachedImpliedBoundInfo(NegationOf(ib_var));
2330 if (ub_info.bool_var != kNoIntegerVariable) {
2331 cut->terms[i].cached_implied_ub = cached_data_.size();
2332 cached_data_.emplace_back(std::move(ub_info));
2333 }
2334 }
2335
2336 return !cached_data_.empty();
2337}
2338
2339void SumOfAllDiffLowerBounder::Clear() {
2340 min_values_.clear();
2341 expr_mins_.clear();
2342}
2343
2344void SumOfAllDiffLowerBounder::Add(const AffineExpression& expr, int num_exprs,
2345 const IntegerTrail& integer_trail) {
2346 expr_mins_.push_back(integer_trail.LevelZeroLowerBound(expr).value());
2347
2348 if (integer_trail.IsFixed(expr)) {
2349 min_values_.insert(integer_trail.FixedValue(expr));
2350 } else {
2351 if (expr.coeff > 0) {
2352 int count = 0;
2353 for (const IntegerValue value :
2354 integer_trail.InitialVariableDomain(expr.var).Values()) {
2355 min_values_.insert(expr.ValueAt(value));
2356 if (++count >= num_exprs) break;
2357 }
2358 } else {
2359 int count = 0;
2360 for (const IntegerValue value :
2361 integer_trail.InitialVariableDomain(expr.var).Negation().Values()) {
2362 min_values_.insert(-expr.ValueAt(value));
2363 if (++count >= num_exprs) break;
2364 }
2365 }
2366 }
2367}
2368
2369IntegerValue SumOfAllDiffLowerBounder::SumOfMinDomainValues() {
2370 int count = 0;
2371 IntegerValue sum = 0;
2372 for (const IntegerValue value : min_values_) {
2373 sum += value;
2374 if (++count >= expr_mins_.size()) return sum;
2375 }
2376 return sum;
2377}
2378
2379IntegerValue SumOfAllDiffLowerBounder::SumOfDifferentMins() {
2380 std::sort(expr_mins_.begin(), expr_mins_.end());
2381 IntegerValue tmp_value = kMinIntegerValue;
2382 IntegerValue result = 0;
2383 for (const IntegerValue value : expr_mins_) {
2384 // Make sure values are different.
2385 tmp_value = std::max(tmp_value + 1, value);
2386 result += tmp_value;
2387 }
2388 return result;
2389}
2390
2391IntegerValue SumOfAllDiffLowerBounder::GetBestLowerBound(std::string& suffix) {
2392 const IntegerValue domain_bound = SumOfMinDomainValues();
2393 const IntegerValue alldiff_bound = SumOfDifferentMins();
2394 if (domain_bound > alldiff_bound) {
2395 suffix = "d";
2396 return domain_bound;
2397 }
2398 suffix = alldiff_bound > domain_bound ? "a" : "e";
2399 return alldiff_bound;
2400}
2401
2402namespace {
2403
2404void TryToGenerateAllDiffCut(
2405 const std::vector<std::pair<double, AffineExpression>>& sorted_exprs_lp,
2406 const IntegerTrail& integer_trail,
2408 TopNCuts& top_n_cuts, Model* model) {
2409 const int num_exprs = sorted_exprs_lp.size();
2410
2411 std::vector<AffineExpression> current_set_exprs;
2412 SumOfAllDiffLowerBounder diff_mins;
2413 SumOfAllDiffLowerBounder negated_diff_maxes;
2414
2415 double sum = 0.0;
2416
2417 for (const auto& [expr_lp, expr] : sorted_exprs_lp) {
2418 sum += expr_lp;
2419 diff_mins.Add(expr, num_exprs, integer_trail);
2420 negated_diff_maxes.Add(expr.Negated(), num_exprs, integer_trail);
2421 current_set_exprs.push_back(expr);
2422 CHECK_EQ(current_set_exprs.size(), diff_mins.size());
2423 CHECK_EQ(current_set_exprs.size(), negated_diff_maxes.size());
2424 std::string min_suffix;
2425 const IntegerValue required_min_sum =
2426 diff_mins.GetBestLowerBound(min_suffix);
2427 std::string max_suffix;
2428 const IntegerValue required_max_sum =
2429 -negated_diff_maxes.GetBestLowerBound(max_suffix);
2430 if (sum < ToDouble(required_min_sum) - kMinCutViolation ||
2431 sum > ToDouble(required_max_sum) + kMinCutViolation) {
2432 LinearConstraintBuilder cut(model, required_min_sum, required_max_sum);
2433 for (AffineExpression expr : current_set_exprs) {
2434 cut.AddTerm(expr, IntegerValue(1));
2435 }
2436 top_n_cuts.AddCut(cut.Build(),
2437 absl::StrCat("AllDiff_", min_suffix, max_suffix),
2438 lp_values);
2439
2440 // NOTE: We can extend the current set but it is more helpful to generate
2441 // the cut on a different set of variables so we reset the counters.
2442 sum = 0.0;
2443 current_set_exprs.clear();
2444 diff_mins.Clear();
2445 negated_diff_maxes.Clear();
2446 }
2447 }
2448}
2449
2450} // namespace
2451
2453 const std::vector<AffineExpression>& exprs, Model* model) {
2454 CutGenerator result;
2455 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2456
2457 for (const AffineExpression& expr : exprs) {
2458 if (!integer_trail->IsFixed(expr)) {
2459 result.vars.push_back(expr.var);
2460 }
2461 }
2463
2464 Trail* trail = model->GetOrCreate<Trail>();
2465 result.generate_cuts = [exprs, integer_trail, trail,
2466 model](LinearConstraintManager* manager) {
2467 // These cuts work at all levels but the generator adds too many cuts on
2468 // some instances and degrade the performance so we only use it at level
2469 // 0.
2470 if (trail->CurrentDecisionLevel() > 0) return true;
2471 const auto& lp_values = manager->LpValues();
2472 std::vector<std::pair<double, AffineExpression>> sorted_exprs;
2473 for (const AffineExpression expr : exprs) {
2474 if (integer_trail->LevelZeroLowerBound(expr) ==
2475 integer_trail->LevelZeroUpperBound(expr)) {
2476 continue;
2477 }
2478 sorted_exprs.push_back(std::make_pair(expr.LpValue(lp_values), expr));
2479 }
2480
2481 TopNCuts top_n_cuts(5);
2482 std::sort(sorted_exprs.begin(), sorted_exprs.end(),
2483 [](std::pair<double, AffineExpression>& a,
2484 const std::pair<double, AffineExpression>& b) {
2485 return a.first < b.first;
2486 });
2487 TryToGenerateAllDiffCut(sorted_exprs, *integer_trail, lp_values, top_n_cuts,
2488 model);
2489 // Other direction.
2490 std::reverse(sorted_exprs.begin(), sorted_exprs.end());
2491 TryToGenerateAllDiffCut(sorted_exprs, *integer_trail, lp_values, top_n_cuts,
2492 model);
2493 top_n_cuts.TransferToManager(manager);
2494 return true;
2495 };
2496 VLOG(2) << "Created all_diff cut generator of size: " << exprs.size();
2497 return result;
2498}
2499
2500namespace {
2501// Returns max((w2i - w1i)*Li, (w2i - w1i)*Ui).
2502IntegerValue MaxCornerDifference(const IntegerVariable var,
2503 const IntegerValue w1_i,
2504 const IntegerValue w2_i,
2505 const IntegerTrail& integer_trail) {
2506 const IntegerValue lb = integer_trail.LevelZeroLowerBound(var);
2507 const IntegerValue ub = integer_trail.LevelZeroUpperBound(var);
2508 return std::max((w2_i - w1_i) * lb, (w2_i - w1_i) * ub);
2509}
2510
2511// This is the coefficient of zk in the cut, where k = max_index.
2512// MPlusCoefficient_ki = max((wki - wI(i)i) * Li,
2513// (wki - wI(i)i) * Ui)
2514// = max corner difference for variable i,
2515// target expr I(i), max expr k.
2516// The coefficient of zk is Sum(i=1..n)(MPlusCoefficient_ki) + bk
2517IntegerValue MPlusCoefficient(
2518 const std::vector<IntegerVariable>& x_vars,
2519 const std::vector<LinearExpression>& exprs,
2520 const util_intops::StrongVector<IntegerVariable, int>& variable_partition,
2521 const int max_index, const IntegerTrail& integer_trail) {
2522 IntegerValue coeff = exprs[max_index].offset;
2523 // TODO(user): This algo is quadratic since GetCoefficientOfPositiveVar()
2524 // is linear. This can be optimized (better complexity) if needed.
2525 for (const IntegerVariable var : x_vars) {
2526 const int target_index = variable_partition[var];
2527 if (max_index != target_index) {
2528 coeff += MaxCornerDifference(
2529 var, GetCoefficientOfPositiveVar(var, exprs[target_index]),
2530 GetCoefficientOfPositiveVar(var, exprs[max_index]), integer_trail);
2531 }
2532 }
2533 return coeff;
2534}
2535
2536// Compute the value of
2537// rhs = wI(i)i * xi + Sum(k=1..d)(MPlusCoefficient_ki * zk)
2538// for variable xi for given target index I(i).
2539double ComputeContribution(
2540 const IntegerVariable xi_var, const std::vector<IntegerVariable>& z_vars,
2541 const std::vector<LinearExpression>& exprs,
2543 const IntegerTrail& integer_trail, const int target_index) {
2544 CHECK_GE(target_index, 0);
2545 CHECK_LT(target_index, exprs.size());
2546 const LinearExpression& target_expr = exprs[target_index];
2547 const double xi_value = lp_values[xi_var];
2548 const IntegerValue wt_i = GetCoefficientOfPositiveVar(xi_var, target_expr);
2549 double contrib = ToDouble(wt_i) * xi_value;
2550 for (int expr_index = 0; expr_index < exprs.size(); ++expr_index) {
2551 if (expr_index == target_index) continue;
2552 const LinearExpression& max_expr = exprs[expr_index];
2553 const double z_max_value = lp_values[z_vars[expr_index]];
2554 const IntegerValue corner_value = MaxCornerDifference(
2555 xi_var, wt_i, GetCoefficientOfPositiveVar(xi_var, max_expr),
2556 integer_trail);
2557 contrib += ToDouble(corner_value) * z_max_value;
2558 }
2559 return contrib;
2560}
2561} // namespace
2562
2564 const IntegerVariable target, const std::vector<LinearExpression>& exprs,
2565 const std::vector<IntegerVariable>& z_vars, Model* model) {
2566 CutGenerator result;
2567 std::vector<IntegerVariable> x_vars;
2568 result.vars = {target};
2569 const int num_exprs = exprs.size();
2570 for (int i = 0; i < num_exprs; ++i) {
2571 result.vars.push_back(z_vars[i]);
2572 x_vars.insert(x_vars.end(), exprs[i].vars.begin(), exprs[i].vars.end());
2573 }
2575 // All expressions should only contain positive variables.
2576 DCHECK(std::all_of(x_vars.begin(), x_vars.end(), [](IntegerVariable var) {
2577 return VariableIsPositive(var);
2578 }));
2579 result.vars.insert(result.vars.end(), x_vars.begin(), x_vars.end());
2580
2581 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2582 result.generate_cuts = [x_vars, z_vars, target, num_exprs, exprs,
2583 integer_trail,
2584 model](LinearConstraintManager* manager) {
2585 const auto& lp_values = manager->LpValues();
2587 lp_values.size(), -1);
2589 variable_partition_contrib(lp_values.size(),
2590 std::numeric_limits<double>::infinity());
2591 for (int expr_index = 0; expr_index < num_exprs; ++expr_index) {
2592 for (const IntegerVariable var : x_vars) {
2593 const double contribution = ComputeContribution(
2594 var, z_vars, exprs, lp_values, *integer_trail, expr_index);
2595 const double prev_contribution = variable_partition_contrib[var];
2596 if (contribution < prev_contribution) {
2597 variable_partition[var] = expr_index;
2598 variable_partition_contrib[var] = contribution;
2599 }
2600 }
2601 }
2602
2603 LinearConstraintBuilder cut(model, /*lb=*/IntegerValue(0),
2604 /*ub=*/kMaxIntegerValue);
2605 double violation = lp_values[target];
2606 cut.AddTerm(target, IntegerValue(-1));
2607
2608 for (const IntegerVariable xi_var : x_vars) {
2609 const int input_index = variable_partition[xi_var];
2610 const LinearExpression& expr = exprs[input_index];
2611 const IntegerValue coeff = GetCoefficientOfPositiveVar(xi_var, expr);
2612 if (coeff != IntegerValue(0)) {
2613 cut.AddTerm(xi_var, coeff);
2614 }
2615 violation -= ToDouble(coeff) * lp_values[xi_var];
2616 }
2617 for (int expr_index = 0; expr_index < num_exprs; ++expr_index) {
2618 const IntegerVariable z_var = z_vars[expr_index];
2619 const IntegerValue z_coeff = MPlusCoefficient(
2620 x_vars, exprs, variable_partition, expr_index, *integer_trail);
2621 if (z_coeff != IntegerValue(0)) {
2622 cut.AddTerm(z_var, z_coeff);
2623 }
2624 violation -= ToDouble(z_coeff) * lp_values[z_var];
2625 }
2626 if (violation > 1e-2) {
2627 manager->AddCut(cut.Build(), "LinMax");
2628 }
2629 return true;
2630 };
2631 return result;
2632}
2633
2634namespace {
2635
2636IntegerValue EvaluateMaxAffine(
2637 const std::vector<std::pair<IntegerValue, IntegerValue>>& affines,
2638 IntegerValue x) {
2639 IntegerValue y = kMinIntegerValue;
2640 for (const auto& p : affines) {
2641 y = std::max(y, x * p.first + p.second);
2642 }
2643 return y;
2644}
2645
2646} // namespace
2647
2649 const LinearExpression& target, IntegerVariable var,
2650 const std::vector<std::pair<IntegerValue, IntegerValue>>& affines,
2651 Model* model, LinearConstraintBuilder* builder) {
2652 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
2653 const IntegerValue x_min = integer_trail->LevelZeroLowerBound(var);
2654 const IntegerValue x_max = integer_trail->LevelZeroUpperBound(var);
2655
2656 const IntegerValue y_at_min = EvaluateMaxAffine(affines, x_min);
2657 const IntegerValue y_at_max = EvaluateMaxAffine(affines, x_max);
2658
2659 const IntegerValue delta_x = x_max - x_min;
2660 const IntegerValue delta_y = y_at_max - y_at_min;
2661
2662 // target <= y_at_min + (delta_y / delta_x) * (var - x_min)
2663 // delta_x * target <= delta_x * y_at_min + delta_y * (var - x_min)
2664 // -delta_y * var + delta_x * target <= delta_x * y_at_min - delta_y * x_min
2665 //
2666 // Checks the rhs for overflows.
2667 if (ProdOverflow(delta_x, y_at_min) || ProdOverflow(delta_x, y_at_max) ||
2668 ProdOverflow(delta_y, x_min) || ProdOverflow(delta_y, x_max)) {
2669 return false;
2670 }
2671
2672 // Checks target * delta_x for overflow.
2673 int64_t abs_magnitude = std::abs(target.offset.value());
2674 for (int i = 0; i < target.vars.size(); ++i) {
2675 const IntegerVariable var = target.vars[i];
2676 const IntegerValue var_min = integer_trail->LevelZeroLowerBound(var);
2677 const IntegerValue var_max = integer_trail->LevelZeroUpperBound(var);
2678 abs_magnitude = CapAdd(
2679 CapProd(std::max(std::abs(var_min.value()), std::abs(var_max.value())),
2680 std::abs(target.coeffs[i].value())),
2681 abs_magnitude);
2682 }
2683 if (AtMinOrMaxInt64(abs_magnitude) ||
2684 AtMinOrMaxInt64(CapProd(abs_magnitude, delta_x.value()))) {
2685 return false;
2686 }
2687
2688 builder->ResetBounds(kMinIntegerValue, delta_x * y_at_min - delta_y * x_min);
2689 builder->AddLinearExpression(target, delta_x);
2690 builder->AddTerm(var, -delta_y);
2691
2692 // Prevent to create constraints that can overflow.
2693 if (!ValidateLinearConstraintForOverflow(builder->Build(), *integer_trail)) {
2694 VLOG(2) << "Linear constraint can cause overflow: " << builder->Build();
2695
2696 return false;
2697 }
2698
2699 return true;
2700}
2701
2703 LinearExpression target, IntegerVariable var,
2704 std::vector<std::pair<IntegerValue, IntegerValue>> affines,
2705 const std::string cut_name, Model* model) {
2706 CutGenerator result;
2707 result.vars = target.vars;
2708 result.vars.push_back(var);
2710
2711 IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
2712 result.generate_cuts = [target, var, affines, cut_name, integer_trail,
2713 model](LinearConstraintManager* manager) {
2714 if (integer_trail->IsFixed(var)) return true;
2716 if (BuildMaxAffineUpConstraint(target, var, affines, model, &builder)) {
2717 manager->AddCut(builder.Build(), cut_name);
2718 }
2719 return true;
2720 };
2721 return result;
2722}
2723
2725 const std::vector<IntegerVariable>& base_variables, Model* model) {
2726 // Filter base_variables to only keep the one with a literal view, and
2727 // do the conversion.
2728 std::vector<IntegerVariable> variables;
2729 std::vector<Literal> literals;
2730 absl::flat_hash_map<LiteralIndex, IntegerVariable> positive_map;
2731 absl::flat_hash_map<LiteralIndex, IntegerVariable> negative_map;
2732 auto* integer_trail = model->GetOrCreate<IntegerTrail>();
2733 auto* encoder = model->GetOrCreate<IntegerEncoder>();
2734 for (const IntegerVariable var : base_variables) {
2735 if (integer_trail->LowerBound(var) != IntegerValue(0)) continue;
2736 if (integer_trail->UpperBound(var) != IntegerValue(1)) continue;
2737 const LiteralIndex literal_index = encoder->GetAssociatedLiteral(
2738 IntegerLiteral::GreaterOrEqual(var, IntegerValue(1)));
2739 if (literal_index != kNoLiteralIndex) {
2740 variables.push_back(var);
2741 literals.push_back(Literal(literal_index));
2742 positive_map[literal_index] = var;
2743 negative_map[Literal(literal_index).NegatedIndex()] = var;
2744 }
2745 }
2746 CutGenerator result;
2747 result.vars = variables;
2748 auto* implication_graph = model->GetOrCreate<BinaryImplicationGraph>();
2749 result.generate_cuts = [variables, literals, implication_graph, positive_map,
2750 negative_map,
2751 model](LinearConstraintManager* manager) {
2752 std::vector<double> packed_values;
2753 const auto& lp_values = manager->LpValues();
2754 for (int i = 0; i < literals.size(); ++i) {
2755 packed_values.push_back(lp_values[variables[i]]);
2756 }
2757 const std::vector<std::vector<Literal>> at_most_ones =
2758 implication_graph->GenerateAtMostOnesWithLargeWeight(literals,
2759 packed_values);
2760
2761 for (const std::vector<Literal>& at_most_one : at_most_ones) {
2762 // We need to express such "at most one" in term of the initial
2763 // variables, so we do not use the
2764 // LinearConstraintBuilder::AddLiteralTerm() here.
2766 model, IntegerValue(std::numeric_limits<int64_t>::min()),
2767 IntegerValue(1));
2768 for (const Literal l : at_most_one) {
2769 if (positive_map.contains(l.Index())) {
2770 builder.AddTerm(positive_map.at(l.Index()), IntegerValue(1));
2771 } else {
2772 // Add 1 - X to the linear constraint.
2773 builder.AddTerm(negative_map.at(l.Index()), IntegerValue(-1));
2774 builder.AddConstant(IntegerValue(1));
2775 }
2776 }
2777
2778 manager->AddCut(builder.Build(), "Clique");
2779 }
2780 return true;
2781 };
2782 return result;
2783}
2784
2785} // namespace sat
2786} // namespace operations_research
IntegerValue y
IntegerValue size
DomainIteratorBeginEnd Values() const &
Stores temporaries used to build or manipulate a CutData.
Definition cuts.h:151
void AddOrMergeTerm(const CutTerm &term, IntegerValue t, CutData *cut)
Definition cuts.cc:294
bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, int i, bool complement, CutData *cut, CutDataBuilder *builder)
Important: The cut_builder_ must have been reset.
Definition cuts.cc:2274
std::pair< int, int > PostprocessWithImpliedBound(const std::function< IntegerValue(IntegerValue)> &f, IntegerValue factor_t, CutData *cut, CutDataBuilder *builder)
Definition cuts.cc:2195
IntegerValue FixedValue(IntegerVariable i) const
Checks that the variable is fixed and returns its value.
Definition integer.h:1729
bool IsFixed(IntegerVariable i) const
Checks if the variable is fixed.
Definition integer.h:1725
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition integer.h:1817
const Domain & InitialVariableDomain(IntegerVariable var) const
Definition integer.cc:876
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Returns globally valid lower/upper bound on the given integer variable.
Definition integer.h:1812
void ResetBounds(IntegerValue lb, IntegerValue ub)
Reset the bounds passed at construction time.
void AddTerm(IntegerVariable var, IntegerValue coeff)
void AddLinearExpression(const LinearExpression &expr)
void AddConstant(IntegerValue value)
Adds the corresponding term to the current linear expression.
LiteralIndex NegatedIndex() const
Definition sat_base.h:92
Utility class for the AllDiff cut generator.
Definition cuts.h:715
IntegerValue GetBestLowerBound(std::string &suffix)
Definition cuts.cc:2391
void Add(const AffineExpression &expr, int num_expr, const IntegerTrail &integer_trail)
Definition cuts.cc:2344
void TransferToManager(LinearConstraintManager *manager)
Empty the local pool and add all its content to the manager.
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
Adds a cut to the local pool.
int64_t b
Definition table.cc:45
int64_t a
Definition table.cc:44
const std::string name
A name for logging purposes.
int64_t value
IntVar * var
GRBmodel * model
int index
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:58
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition integer.h:94
CutGenerator CreateCliqueCutGenerator(const std::vector< IntegerVariable > &base_variables, Model *model)
Definition cuts.cc:2724
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Computes result += a * b, and return false iff there is an overflow.
Definition integer.h:169
void DivideByGCD(LinearConstraint *constraint)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
IntegerValue GetFactorT(IntegerValue rhs_remainder, IntegerValue divisor, IntegerValue max_magnitude)
Definition cuts.cc:482
IntType IntTypeAbs(IntType t)
Definition integer.h:81
bool BuildMaxAffineUpConstraint(const LinearExpression &target, IntegerVariable var, const std::vector< std::pair< IntegerValue, IntegerValue > > &affines, Model *model, LinearConstraintBuilder *builder)
Definition cuts.cc:2648
CutGenerator CreatePositiveMultiplicationCutGenerator(AffineExpression z, AffineExpression x, AffineExpression y, int linearization_level, Model *model)
A cut generator for z = x * y (x and y >= 0).
Definition cuts.cc:1859
IntegerValue CeilRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition integer.h:85
bool ProdOverflow(IntegerValue t, IntegerValue value)
Definition integer.h:117
const LiteralIndex kNoLiteralIndex(-1)
LinearConstraint ComputeHyperplanBelowSquare(AffineExpression x, AffineExpression square, IntegerValue x_value, Model *model)
Definition cuts.cc:1966
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Returns the vector of the negated variables.
Definition integer.cc:51
CutGenerator CreateLinMaxCutGenerator(const IntegerVariable target, const std::vector< LinearExpression > &exprs, const std::vector< IntegerVariable > &z_vars, Model *model)
Definition cuts.cc:2563
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
const IntegerVariable kNoIntegerVariable(-1)
void CleanTermsAndFillConstraint(std::vector< std::pair< IntegerVariable, IntegerValue > > *terms, LinearExpression *output)
std::function< IntegerValue(IntegerValue)> GetSuperAdditiveStrengtheningFunction(IntegerValue positive_rhs, IntegerValue min_magnitude)
Definition cuts.cc:581
CutGenerator CreateSquareCutGenerator(AffineExpression y, AffineExpression x, int linearization_level, Model *model)
Definition cuts.cc:1978
IntegerVariable PositiveVariable(IntegerVariable i)
Definition integer.h:193
IntegerValue PositiveRemainder(IntegerValue dividend, IntegerValue positive_divisor)
Definition integer.h:153
CutGenerator CreateMaxAffineCutGenerator(LinearExpression target, IntegerVariable var, std::vector< std::pair< IntegerValue, IntegerValue > > affines, const std::string cut_name, Model *model)
Definition cuts.cc:2702
IntegerValue CapAddI(IntegerValue a, IntegerValue b)
Definition integer.h:113
LinearConstraint ComputeHyperplanAboveSquare(AffineExpression x, AffineExpression square, IntegerValue x_lb, IntegerValue x_ub, Model *model)
Definition cuts.cc:1954
CutGenerator CreateAllDifferentCutGenerator(const std::vector< AffineExpression > &exprs, Model *model)
Definition cuts.cc:2452
IntegerValue GetCoefficientOfPositiveVar(const IntegerVariable var, const LinearExpression &expr)
bool ValidateLinearConstraintForOverflow(const LinearConstraint &constraint, const IntegerTrail &integer_trail)
std::function< IntegerValue(IntegerValue)> ExtendNegativeFunction(std::function< IntegerValue(IntegerValue)> base_f, IntegerValue period)
Definition cuts.h:362
std::function< IntegerValue(IntegerValue)> GetSuperAdditiveStrengtheningMirFunction(IntegerValue positive_rhs, IntegerValue scaling)
Definition cuts.cc:616
bool VariableIsPositive(IntegerVariable i)
Definition integer.h:189
IntegerValue CapSubI(IntegerValue a, IntegerValue b)
Definition integer.h:109
bool AtMinOrMaxInt64I(IntegerValue t)
Definition integer.h:121
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
Overflows and saturated arithmetic.
Definition integer.h:105
double ToDouble(IntegerValue value)
Definition integer.h:73
std::function< IntegerValue(IntegerValue)> GetSuperAdditiveRoundingFunction(IntegerValue rhs_remainder, IntegerValue divisor, IntegerValue t, IntegerValue max_scaling)
Definition cuts.cc:496
In SWIG mode, we don't want anything besides these top-level includes.
bool AtMinOrMaxInt64(int64_t x)
Checks if x is equal to the min or the max value of an int64_t.
int64_t CapAdd(int64_t x, int64_t y)
int64_t FloorRatio(int64_t value, int64_t positive_coeff)
int64_t CeilRatio(int64_t value, int64_t positive_coeff)
int64_t CapProd(int64_t x, int64_t y)
if(!yyg->yy_init)
Definition parser.yy.cc:965
static int input(yyscan_t yyscanner)
const Variable x
Definition qp_tests.cc:127
Fractional ratio
std::vector< double > lower_bounds
Definition lp_utils.cc:746
std::vector< double > upper_bounds
Definition lp_utils.cc:747
double l2_norm
The L2 norm of the finite values.
IntegerValue ValueAt(IntegerValue var_value) const
Returns the value of this affine expression given its variable value.
Definition integer.h:330
double LpValue(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const
Returns the affine expression value under a given LP solution.
Definition integer.h:335
Our cut are always of the form linear_expression <= rhs.
Definition cuts.h:110
void Canonicalize()
This sorts terms and fill both num_relevant_entries and max_magnitude.
Definition cuts.cc:235
bool FillFromLinearConstraint(const LinearConstraint &base_ct, const util_intops::StrongVector< IntegerVariable, double > &lp_values, IntegerTrail *integer_trail)
Definition cuts.cc:170
bool AllCoefficientsArePositive() const
These functions transform the cut by complementation.
Definition cuts.cc:228
std::vector< CutTerm > terms
Definition cuts.h:142
bool FillFromParallelVectors(IntegerValue ub, absl::Span< const IntegerVariable > vars, absl::Span< const IntegerValue > coeffs, absl::Span< const double > lp_values, absl::Span< const IntegerValue > lower_bounds, absl::Span< const IntegerValue > upper_bounds)
Definition cuts.cc:188
double ComputeEfficacy() const
Definition cuts.cc:262
std::string DebugString() const
Definition cuts.cc:72
double ComputeViolation() const
Computes and returns the cut violation.
Definition cuts.cc:254
bool AppendOneTerm(IntegerVariable var, IntegerValue coeff, double lp_value, IntegerValue lb, IntegerValue ub)
Definition cuts.cc:133
std::vector< IntegerVariable > vars
Definition cuts.h:55
std::function< bool(LinearConstraintManager *manager)> generate_cuts
Definition cuts.h:56
IntegerVariable GetUnderlyingLiteralOrNone() const
Definition cuts.cc:113
std::string DebugString() const
Definition cuts.cc:64
int cached_implied_lb
Refer to cached_data_ in ImpliedBoundsProcessor.
Definition cuts.h:105
double LpDistToMaxValue() const
Definition cuts.h:66
void ReplaceExpressionByLiteral(IntegerVariable var)
Definition cuts.cc:99
void Complement(absl::int128 *rhs)
Definition cuts.cc:80
std::array< IntegerVariable, 2 > expr_vars
Definition cuts.h:101
std::array< IntegerValue, 2 > expr_coeffs
Definition cuts.h:102
bool HasRelevantLpValue() const
Definition cuts.h:65
std::unique_ptr< IntegerValue[]> coeffs
std::unique_ptr< IntegerVariable[]> vars