Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
linear_constraint_manager.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdint.h>
17
18#include <algorithm>
19#include <cmath>
20#include <cstddef>
21#include <limits>
22#include <numeric>
23#include <string>
24#include <utility>
25#include <vector>
26
27#include "absl/container/btree_map.h"
28#include "absl/container/flat_hash_map.h"
29#include "absl/log/check.h"
30#include "absl/log/log.h"
31#include "absl/log/vlog_is_on.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/string_view.h"
34#include "absl/types/span.h"
35#include "ortools/base/hash.h"
40#include "ortools/sat/integer.h"
43#include "ortools/sat/model.h"
46#include "ortools/sat/util.h"
50
51namespace operations_research {
52namespace sat {
53
55 if (!VLOG_IS_ON(1)) return;
56 std::vector<std::pair<std::string, int64_t>> stats;
57 stats.push_back({"symmetrizer/overflows", num_overflows_});
58 shared_stats_->AddStats(stats);
59}
60
62 IntegerVariable sum_var, absl::Span<const IntegerVariable> orbit) {
63 CHECK_GT(orbit.size(), 1);
64
65 // Store the orbit info.
66 const int orbit_index = orbits_.size();
67 has_symmetry_ = true;
68 orbit_sum_vars_.push_back(sum_var);
69 orbits_.Add(orbit);
70
71 // And fill var_to_orbit_index_.
72 const int new_size = integer_trail_->NumIntegerVariables().value() / 2;
73 if (var_to_orbit_index_.size() < new_size) {
74 var_to_orbit_index_.resize(new_size, -1);
75 }
76 DCHECK(VariableIsPositive(sum_var));
77 DCHECK_EQ(var_to_orbit_index_[GetPositiveOnlyIndex(sum_var)], -1);
78 var_to_orbit_index_[GetPositiveOnlyIndex(sum_var)] = orbit_index;
79 for (const IntegerVariable var : orbit) {
80 DCHECK(VariableIsPositive(var));
81 DCHECK_EQ(var_to_orbit_index_[GetPositiveOnlyIndex(var)], -1);
82 var_to_orbit_index_[GetPositiveOnlyIndex(var)] = orbit_index;
83 }
84}
85
86// What we do here is basically equivalent to adding all the possible
87// permutations (under the problem symmetry group) of the constraint together.
88// When we do that all variables in the same orbit will have the same
89// coefficient (TODO(user): think how to prove this properly and especially
90// that the scaling is in 1/orbit_size) and will each appear once. We then
91// substitute each sum by the sum over the orbit, and divide coefficient by
92// their gcd.
93//
94// Any solution of the original LP can be transformed to a solution of the
95// folded LP with the same objective. So the folded LP will give us a tight and
96// valid objective lower bound but with a lot less variables! This is an
97// adaptation of "LP folding" to use in a MIP context. Introducing the orbit sum
98// allow to propagates and add cuts as these sum are still integer for us.
99//
100// The only issue is regarding scaling of the constraints. Basically each
101// orbit sum variable will appear with a factor 1/orbit_size in the original
102// constraint.
103//
104// We will remap & scale the constraint.
105// If not possible, we will drop it for now.
107 bool* folded) {
108 if (!has_symmetry_) return true;
109
110 // We assume the constraint had basic preprocessing with tight lb/ub for
111 // instance. First pass is to compute the scaling factor.
112 int64_t scaling_factor = 1;
113 for (int i = 0; i < ct->num_terms; ++i) {
114 const IntegerVariable var = ct->vars[i];
115 CHECK(VariableIsPositive(var));
116
117 const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)];
118 if (orbit_index == -1 || orbit_sum_vars_[orbit_index] == var) {
119 // If we have an orbit of size one, or the variable is its own
120 // representative (orbit sum), skip.
121 continue;
122 }
123
124 // Update the scaling factor.
125 const int orbit_size = orbits_[orbit_index].size();
126 if (AtMinOrMaxInt64(CapProd(orbit_size, scaling_factor))) {
127 ++num_overflows_;
128 VLOG(2) << "SYMMETRY skip constraint due to overflow";
129 return false;
130 }
131 scaling_factor = std::lcm(scaling_factor, orbit_size);
132 }
133
134 if (scaling_factor == 1) {
135 // No symmetric variables.
136 return true;
137 }
138
139 if (folded != nullptr) *folded = true;
140
141 // We need to multiply each term by scaling_factor / orbit_size.
142 //
143 // TODO(user): Now that we know the actual coefficient we could scale less.
144 // Maybe the coefficient of an orbit_var is already divisible by orbit_size.
145 builder_.Clear();
146 for (int i = 0; i < ct->num_terms; ++i) {
147 const IntegerVariable var = ct->vars[i];
148 const IntegerValue coeff = ct->coeffs[i];
149
150 const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)];
151 if (orbit_index == -1 || orbit_sum_vars_[orbit_index] == var) {
152 const int64_t scaled_coeff = CapProd(coeff.value(), scaling_factor);
153 if (AtMinOrMaxInt64(scaled_coeff)) {
154 ++num_overflows_;
155 VLOG(2) << "SYMMETRY skip constraint due to overflow";
156 return false;
157 }
158 builder_.AddTerm(var, scaled_coeff);
159 } else {
160 const int64_t orbit_size = orbits_[orbit_index].size();
161 const int64_t factor = scaling_factor / orbit_size;
162 const int64_t scaled_coeff = CapProd(coeff.value(), factor);
163 if (AtMinOrMaxInt64(scaled_coeff)) {
164 ++num_overflows_;
165 VLOG(2) << "SYMMETRY skip constraint due to overflow";
166 return false;
167 }
168 builder_.AddTerm(orbit_sum_vars_[orbit_index], scaled_coeff);
169 }
170 }
171
172 if (AtMinOrMaxInt64(CapProd(ct->lb.value(), scaling_factor)) ||
173 AtMinOrMaxInt64(CapProd(ct->ub.value(), scaling_factor))) {
174 ++num_overflows_;
175 VLOG(2) << "SYMMETRY skip constraint due to lb/ub overflow";
176 return false;
177 }
178 if (!builder_.BuildIntoConstraintAndCheckOverflow(
179 ct->lb * scaling_factor, ct->ub * scaling_factor, ct)) {
180 ++num_overflows_;
181 VLOG(2) << "SYMMETRY skip constraint due to overflow";
182 return false;
183 }
184
185 // Dividing by gcd can help.
186 DivideByGCD(ct);
187 if (PossibleOverflow(*integer_trail_, *ct)) {
188 ++num_overflows_;
189 VLOG(2) << "SYMMETRY skip constraint due to overflow factor = "
190 << scaling_factor;
191 return false;
192 }
193
194 // TODO(user): In some cases, this constraint will propagate/fix directly
195 // the orbit sum variables, we might want to propagate this in the cp world?
196 // This migth also remove bad scaling.
197 return true;
198}
199
200int LinearConstraintSymmetrizer::OrbitIndex(IntegerVariable var) const {
201 if (!has_symmetry_) return -1;
202 return var_to_orbit_index_[GetPositiveOnlyIndex(var)];
203}
204
205bool LinearConstraintSymmetrizer::IsOrbitSumVar(IntegerVariable var) const {
206 if (!has_symmetry_) return false;
207 const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)];
208 return orbit_index >= 0 && orbit_sum_vars_[orbit_index] == var;
209}
210
212 IntegerVariable var) const {
213 if (!has_symmetry_) return true;
214 const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)];
215 return orbit_index == -1 || orbit_sum_vars_[orbit_index] == var;
216}
217
218namespace {
219
220const LinearConstraintManager::ConstraintIndex kInvalidConstraintIndex(-1);
221
222size_t ComputeHashOfTerms(const LinearConstraint& ct) {
223 DCHECK(std::is_sorted(ct.vars.get(), ct.vars.get() + ct.num_terms));
224 size_t hash = 0;
225 const int num_terms = ct.num_terms;
226 for (int i = 0; i < num_terms; ++i) {
227 hash = util_hash::Hash(ct.vars[i].value(), hash);
228 hash = util_hash::Hash(ct.coeffs[i].value(), hash);
229 }
230 return hash;
231}
232
233} // namespace
234
236 if (!VLOG_IS_ON(1)) return;
237 if (model_->Get<SharedStatistics>() == nullptr) return;
238
239 std::vector<std::pair<std::string, int64_t>> cut_stats;
240 for (const auto& entry : type_to_num_cuts_) {
241 cut_stats.push_back({absl::StrCat("cut/", entry.first), entry.second});
242 }
243 model_->Mutable<SharedStatistics>()->AddStats(cut_stats);
244}
245
246void LinearConstraintManager::RescaleActiveCounts(const double scaling_factor) {
247 for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) {
248 constraint_infos_[i].active_count *= scaling_factor;
249 }
250 constraint_active_count_increase_ *= scaling_factor;
251 VLOG(3) << "Rescaled active counts by " << scaling_factor;
252}
253
254bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints(
255 glop::BasisState* solution_state) {
256 if (solution_state->IsEmpty()) return false; // Mainly to simplify tests.
257 const glop::RowIndex num_rows(lp_constraints_.size());
258 const glop::ColIndex num_cols =
259 solution_state->statuses.size() - RowToColIndex(num_rows);
260 int new_size = 0;
261 for (int i = 0; i < num_rows; ++i) {
262 const ConstraintIndex constraint_index = lp_constraints_[i];
263 if (constraint_infos_[constraint_index].constraint.num_terms == 0) {
264 // Remove empty constraint.
265 //
266 // TODO(user): If the constraint is infeasible we could detect unsat
267 // right away, but hopefully this is a case where the propagation part
268 // of the solver can detect that too.
269 constraint_infos_[constraint_index].is_in_lp = false;
270 continue;
271 }
272
273 // Constraints that are not tight in the current solution have a basic
274 // status. We remove the ones that have been inactive in the last recent
275 // solves.
276 //
277 // TODO(user): More advanced heuristics might perform better, I didn't do
278 // a lot of tuning experiments yet.
279 const glop::VariableStatus row_status =
280 solution_state->statuses[num_cols + glop::ColIndex(i)];
281 if (row_status == glop::VariableStatus::BASIC) {
282 constraint_infos_[constraint_index].inactive_count++;
283 if (constraint_infos_[constraint_index].inactive_count >
284 sat_parameters_.max_consecutive_inactive_count()) {
285 constraint_infos_[constraint_index].is_in_lp = false;
286 continue; // Remove it.
287 }
288 } else {
289 // Only count consecutive inactivities.
290 constraint_infos_[constraint_index].inactive_count = 0;
291 }
292
293 lp_constraints_[new_size] = constraint_index;
294 solution_state->statuses[num_cols + glop::ColIndex(new_size)] = row_status;
295 new_size++;
296 }
297 const int num_removed_constraints = lp_constraints_.size() - new_size;
298 lp_constraints_.resize(new_size);
299 solution_state->statuses.resize(num_cols + glop::ColIndex(new_size));
300 if (num_removed_constraints > 0) {
301 VLOG(3) << "Removed " << num_removed_constraints << " constraints";
302 }
303 return num_removed_constraints > 0;
304}
305
306// Because sometimes we split a == constraint in two (>= and <=), it makes sense
307// to detect duplicate constraints and merge bounds. This is also relevant if
308// we regenerate identical cuts for some reason.
309LinearConstraintManager::ConstraintIndex LinearConstraintManager::Add(
310 LinearConstraint ct, bool* added, bool* folded) {
311 DCHECK_GT(ct.num_terms, 0);
312 DCHECK(!PossibleOverflow(integer_trail_, ct)) << ct.DebugString();
313 DCHECK(NoDuplicateVariable(ct));
314 SimplifyConstraint(&ct);
315 DivideByGCD(&ct);
317 DCHECK(DebugCheckConstraint(ct));
318
319 // If configured, store instead the folded version of this constraint.
320 // TODO(user): Shall we simplify again?
321 if (symmetrizer_->HasSymmetry() &&
322 !symmetrizer_->FoldLinearConstraint(&ct, folded)) {
323 return kInvalidConstraintIndex;
324 }
325 CHECK(std::is_sorted(ct.VarsAsSpan().begin(), ct.VarsAsSpan().end()));
326
327 // If an identical constraint exists, only updates its bound.
328 const size_t key = ComputeHashOfTerms(ct);
329 if (equiv_constraints_.contains(key)) {
330 const ConstraintIndex ct_index = equiv_constraints_[key];
331 if (constraint_infos_[ct_index].constraint.IsEqualIgnoringBounds(ct)) {
332 bool tightened = false;
333 if (ct.lb > constraint_infos_[ct_index].constraint.lb) {
334 tightened = true;
335 if (constraint_infos_[ct_index].is_in_lp) current_lp_is_changed_ = true;
336 constraint_infos_[ct_index].constraint.lb = ct.lb;
337 }
338 if (ct.ub < constraint_infos_[ct_index].constraint.ub) {
339 tightened = true;
340 if (constraint_infos_[ct_index].is_in_lp) current_lp_is_changed_ = true;
341 constraint_infos_[ct_index].constraint.ub = ct.ub;
342 }
343 if (added != nullptr) *added = tightened;
344 if (tightened) {
345 ++num_merged_constraints_;
346 FillDerivedFields(&constraint_infos_[ct_index]);
347 }
348 return ct_index;
349 }
350 }
351
352 if (added != nullptr) *added = true;
353 const ConstraintIndex ct_index(constraint_infos_.size());
354 ConstraintInfo ct_info;
355 ct_info.constraint = std::move(ct);
356 ct_info.l2_norm = ComputeL2Norm(ct_info.constraint);
357 FillDerivedFields(&ct_info);
358 ct_info.hash = key;
359 equiv_constraints_[key] = ct_index;
360 ct_info.active_count = constraint_active_count_increase_;
361 constraint_infos_.push_back(std::move(ct_info));
362 return ct_index;
363}
364
365bool LinearConstraintManager::UpdateConstraintLb(glop::RowIndex index_in_lp,
366 IntegerValue new_lb) {
367 const ConstraintIndex index = lp_constraints_[index_in_lp.value()];
368 ConstraintInfo& info = constraint_infos_[index];
369 if (new_lb <= info.constraint.lb) return false;
370 ++num_constraint_updates_;
371 current_lp_is_changed_ = true;
372 info.constraint.lb = new_lb;
373 return true;
374}
375
376bool LinearConstraintManager::UpdateConstraintUb(glop::RowIndex index_in_lp,
377 IntegerValue new_ub) {
378 const ConstraintIndex index = lp_constraints_[index_in_lp.value()];
379 ConstraintInfo& info = constraint_infos_[index];
380 if (new_ub >= info.constraint.ub) return false;
381 ++num_constraint_updates_;
382 current_lp_is_changed_ = true;
383 info.constraint.ub = new_ub;
384 return true;
385}
386
387void LinearConstraintManager::ComputeObjectiveParallelism(
388 const ConstraintIndex ct_index) {
389 CHECK(objective_is_defined_);
390 // lazy computation of objective norm.
391 if (!objective_norm_computed_) {
392 objective_l2_norm_ = std::sqrt(sum_of_squared_objective_coeffs_);
393 objective_norm_computed_ = true;
394 }
395 CHECK_GT(objective_l2_norm_, 0.0);
396
397 constraint_infos_[ct_index].objective_parallelism_computed = true;
398 if (constraint_infos_[ct_index].l2_norm == 0.0) {
399 constraint_infos_[ct_index].objective_parallelism = 0.0;
400 return;
401 }
402
403 const LinearConstraint& lc = constraint_infos_[ct_index].constraint;
404 double unscaled_objective_parallelism = 0.0;
405 for (int i = 0; i < lc.num_terms; ++i) {
406 const IntegerVariable var = lc.vars[i];
407 const auto it = objective_map_.find(var);
408 if (it == objective_map_.end()) continue;
409 unscaled_objective_parallelism += it->second * ToDouble(lc.coeffs[i]);
410 }
411 const double objective_parallelism =
412 unscaled_objective_parallelism /
413 (constraint_infos_[ct_index].l2_norm * objective_l2_norm_);
414 constraint_infos_[ct_index].objective_parallelism =
415 std::abs(objective_parallelism);
416}
417
418// Same as Add(), but logs some information about the newly added constraint.
419// Cuts are also handled slightly differently than normal constraints.
421 std::string extra_info) {
422 ++num_add_cut_calls_;
423 if (ct.num_terms == 0) return false;
424
425 const double activity = ComputeActivity(ct, expanded_lp_solution_);
426 const double violation =
427 std::max(activity - ToDouble(ct.ub), ToDouble(ct.lb) - activity);
428 const double l2_norm = ComputeL2Norm(ct);
429
430 // Only add cut with sufficient efficacy.
431 if (violation / l2_norm < 1e-4) {
432 VLOG(3) << "BAD Cut '" << type_name << "'"
433 << " size=" << ct.num_terms
434 << " max_magnitude=" << ComputeInfinityNorm(ct)
435 << " norm=" << l2_norm << " violation=" << violation
436 << " eff=" << violation / l2_norm << " " << extra_info;
437 return false;
438 }
439
440 // TODO(user): We could prevent overflow by dividing more. Note that mainly
441 // happen with super large variable domain since we usually restrict the size
442 // of the generated coefficients in our cuts. So it shouldn't be that
443 // important.
444 if (PossibleOverflow(integer_trail_, ct)) return false;
445
446 bool added = false;
447 const ConstraintIndex ct_index = Add(std::move(ct), &added);
448
449 // We only mark the constraint as a cut if it is not an update of an already
450 // existing one.
451 if (!added) return false;
452
453 // TODO(user): Use better heuristic here for detecting good cuts and mark
454 // them undeletable.
455 constraint_infos_[ct_index].is_deletable = true;
456
457 VLOG(2) << "Cut '" << type_name << "'"
458 << " size=" << constraint_infos_[ct_index].constraint.num_terms
459 << " max_magnitude="
460 << ComputeInfinityNorm(constraint_infos_[ct_index].constraint)
461 << " norm=" << l2_norm << " violation=" << violation
462 << " eff=" << violation / l2_norm << " " << extra_info;
463
464 num_cuts_++;
465 num_deletable_constraints_++;
466 type_to_num_cuts_[type_name]++;
467 return true;
468}
469
470void LinearConstraintManager::PermanentlyRemoveSomeConstraints() {
471 std::vector<double> deletable_constraint_counts;
472 for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) {
473 if (constraint_infos_[i].is_deletable && !constraint_infos_[i].is_in_lp) {
474 deletable_constraint_counts.push_back(constraint_infos_[i].active_count);
475 }
476 }
477 if (deletable_constraint_counts.empty()) return;
478 std::sort(deletable_constraint_counts.begin(),
479 deletable_constraint_counts.end());
480
481 // We will delete the oldest (in the order they where added) cleanup target
482 // constraints with a count lower or equal to this.
483 double active_count_threshold = std::numeric_limits<double>::infinity();
484 if (sat_parameters_.cut_cleanup_target() <
485 deletable_constraint_counts.size()) {
486 active_count_threshold =
487 deletable_constraint_counts[sat_parameters_.cut_cleanup_target()];
488 }
489
490 ConstraintIndex new_size(0);
491 equiv_constraints_.clear();
492 util_intops::StrongVector<ConstraintIndex, ConstraintIndex> index_mapping(
493 constraint_infos_.size());
494 int num_deleted_constraints = 0;
495 for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) {
496 if (constraint_infos_[i].is_deletable && !constraint_infos_[i].is_in_lp &&
497 constraint_infos_[i].active_count <= active_count_threshold &&
498 num_deleted_constraints < sat_parameters_.cut_cleanup_target()) {
499 ++num_deleted_constraints;
500 continue;
501 }
502
503 if (i != new_size) {
504 constraint_infos_[new_size] = std::move(constraint_infos_[i]);
505 }
506 index_mapping[i] = new_size;
507
508 // Make sure we recompute the hash_map of identical constraints.
509 equiv_constraints_[constraint_infos_[new_size].hash] = new_size;
510 new_size++;
511 }
512 constraint_infos_.resize(new_size.value());
513
514 // Also update lp_constraints_
515 for (int i = 0; i < lp_constraints_.size(); ++i) {
516 lp_constraints_[i] = index_mapping[lp_constraints_[i]];
517 }
518
519 if (num_deleted_constraints > 0) {
520 VLOG(3) << "Constraint manager cleanup: #deleted:"
521 << num_deleted_constraints;
522 }
523 num_deletable_constraints_ -= num_deleted_constraints;
524}
525
527 IntegerValue coeff) {
528 if (coeff == IntegerValue(0)) return;
529 objective_is_defined_ = true;
530 if (!VariableIsPositive(var)) {
531 var = NegationOf(var);
532 coeff = -coeff;
533 }
534 const double coeff_as_double = ToDouble(coeff);
535 const auto insert = objective_map_.insert({var, coeff_as_double});
536 CHECK(insert.second)
537 << "SetObjectiveCoefficient() called twice with same variable";
538 sum_of_squared_objective_coeffs_ += coeff_as_double * coeff_as_double;
539}
540
541// TODO(user): Also consider partial gcd simplification? see presolve.
542bool LinearConstraintManager::SimplifyConstraint(LinearConstraint* ct) {
543 bool term_changed = false;
544
545 IntegerValue min_sum(0);
546 IntegerValue max_sum(0);
547 IntegerValue max_magnitude(0);
548 IntegerValue min_magnitude = kMaxIntegerValue;
549 int new_size = 0;
550 const int num_terms = ct->num_terms;
551 for (int i = 0; i < num_terms; ++i) {
552 const IntegerVariable var = ct->vars[i];
553 const IntegerValue coeff = ct->coeffs[i];
554 const IntegerValue lb = integer_trail_.LevelZeroLowerBound(var);
555 const IntegerValue ub = integer_trail_.LevelZeroUpperBound(var);
556
557 // For now we do not change ct, but just compute its new_size if we where
558 // to remove a fixed term.
559 if (lb == ub) continue;
560 ++new_size;
561
562 const IntegerValue magnitude = IntTypeAbs(coeff);
563 max_magnitude = std::max(max_magnitude, magnitude);
564 min_magnitude = std::min(min_magnitude, magnitude);
565 if (coeff > 0.0) {
566 min_sum += coeff * lb;
567 max_sum += coeff * ub;
568 } else {
569 min_sum += coeff * ub;
570 max_sum += coeff * lb;
571 }
572 }
573
574 // Shorten the constraint if needed.
575 IntegerValue fixed_part = 0;
576 if (new_size < num_terms) {
577 term_changed = true;
578 ++num_shortened_constraints_;
579 new_size = 0;
580 for (int i = 0; i < num_terms; ++i) {
581 const IntegerVariable var = ct->vars[i];
582 const IntegerValue coeff = ct->coeffs[i];
583 const IntegerValue lb = integer_trail_.LevelZeroLowerBound(var);
584 const IntegerValue ub = integer_trail_.LevelZeroUpperBound(var);
585 if (lb == ub) {
586 fixed_part += coeff * lb;
587 continue;
588 }
589 ct->vars[new_size] = var;
590 ct->coeffs[new_size] = coeff;
591 ++new_size;
592 }
593 ct->resize(new_size);
594 }
595
596 // Clear constraints that are always true.
597 // We rely on the deletion code to remove them eventually.
598 if (min_sum + fixed_part >= ct->lb && max_sum + fixed_part <= ct->ub) {
599 ct->resize(0);
600 ct->lb = 0;
601 ct->ub = 0;
602 return true;
603 }
604
605 // Make sure bounds are finite.
606 ct->lb = std::max(ct->lb, min_sum + fixed_part);
607 ct->ub = std::min(ct->ub, max_sum + fixed_part);
608 ct->lb -= fixed_part;
609 ct->ub -= fixed_part;
610
611 // The variable can be shifted and complemented so we have constraints of
612 // the form:
613 // ... + |coeff| * X >= threshold_ub
614 // ... + |coeff| * X' >= threshold_lb
615 // In both case if coeff is big, we can reduce it and update the rhs
616 // accordingly.
617 const IntegerValue threshold_ub = max_sum - ct->ub;
618 const IntegerValue threshold_lb = ct->lb - min_sum;
619 const IntegerValue threshold = std::max(threshold_lb, threshold_ub);
620 CHECK_GT(threshold, 0); // Since we aborted for trivial constraint.
621
622 // TODO(user): In some case, we could split the constraint to reduce one of
623 // them further. But not sure that is a good thing.
624 if (threshold_ub > 0 && threshold_lb > 0 && threshold_lb != threshold_ub) {
625 if (max_magnitude > std::min(threshold_lb, threshold_ub)) {
626 ++num_split_constraints_;
627 }
628 }
629
630 // TODO(user): For constraint with both bound, we could reduce further for
631 // coefficient between threshold - min_magnitude and min(t_lb, t_ub).
632 const IntegerValue second_threshold = std::max(
633 {CeilRatio(threshold, IntegerValue(2)), threshold - min_magnitude,
634 std::min(threshold_lb, threshold_ub)});
635 if (max_magnitude > second_threshold) {
636 const int num_terms = ct->num_terms;
637 for (int i = 0; i < num_terms; ++i) {
638 // In all cases, we reason on a transformed constraint where the term
639 // is max_value - |coeff| * positive_X. If we change coeff, and
640 // retransform the constraint, we need to change the rhs by the
641 // constant term left.
642 const IntegerValue coeff = ct->coeffs[i];
643 const IntegerVariable var = ct->vars[i];
644 const IntegerValue lb = integer_trail_.LevelZeroLowerBound(var);
645 const IntegerValue ub = integer_trail_.LevelZeroUpperBound(var);
646 if (coeff > threshold) {
647 term_changed = true;
648 ++num_coeff_strenghtening_;
649 ct->coeffs[i] = threshold;
650 ct->ub -= (coeff - threshold) * ub;
651 ct->lb -= (coeff - threshold) * lb;
652 } else if (coeff > second_threshold && coeff < threshold) {
653 term_changed = true;
654 ++num_coeff_strenghtening_;
655 ct->coeffs[i] = second_threshold;
656 ct->ub -= (coeff - second_threshold) * ub;
657 ct->lb -= (coeff - second_threshold) * lb;
658 } else if (coeff < -threshold) {
659 term_changed = true;
660 ++num_coeff_strenghtening_;
661 ct->coeffs[i] = -threshold;
662 ct->ub -= (coeff + threshold) * lb;
663 ct->lb -= (coeff + threshold) * ub;
664 } else if (coeff < -second_threshold && coeff > -threshold) {
665 term_changed = true;
666 ++num_coeff_strenghtening_;
667 ct->coeffs[i] = -second_threshold;
668 ct->ub -= (coeff + second_threshold) * lb;
669 ct->lb -= (coeff + second_threshold) * ub;
670 }
671 }
672 }
673
674 return term_changed;
675}
676
677void LinearConstraintManager::FillDerivedFields(ConstraintInfo* info) {
678 IntegerValue min_sum(0);
679 IntegerValue max_sum(0);
680 const int num_terms = info->constraint.num_terms;
681 for (int i = 0; i < num_terms; ++i) {
682 const IntegerVariable var = info->constraint.vars[i];
683 const IntegerValue coeff = info->constraint.coeffs[i];
684 const IntegerValue lb = integer_trail_.LevelZeroLowerBound(var);
685 const IntegerValue ub = integer_trail_.LevelZeroUpperBound(var);
686 if (coeff > 0.0) {
687 min_sum += coeff * lb;
688 max_sum += coeff * ub;
689 } else {
690 min_sum += coeff * ub;
691 max_sum += coeff * lb;
692 }
693 }
694 info->constraint.lb = std::max(min_sum, info->constraint.lb);
695 info->constraint.ub = std::min(max_sum, info->constraint.ub);
696 CHECK_NE(CapSub(info->constraint.ub.value(), info->constraint.lb.value()),
697 std::numeric_limits<int64_t>::max());
698 info->lb_is_trivial = min_sum >= info->constraint.lb;
699 info->ub_is_trivial = max_sum <= info->constraint.ub;
700}
701
703 int* num_new_constraints) {
704 VLOG(3) << "Enter ChangeLP, scan " << constraint_infos_.size()
705 << " constraints";
706 const double saved_dtime = dtime_;
707 std::vector<std::pair<ConstraintIndex, double>> new_constraints_by_score;
708 std::vector<double> new_constraints_efficacies;
709 std::vector<double> new_constraints_orthogonalities;
710
711 const bool simplify_constraints =
712 integer_trail_.num_level_zero_enqueues() > last_simplification_timestamp_;
713 last_simplification_timestamp_ = integer_trail_.num_level_zero_enqueues();
714
715 // We keep any constraints that is already present, and otherwise, we add the
716 // ones that are currently not satisfied by at least "tolerance" to the set
717 // of potential new constraints.
718 bool rescale_active_count = false;
719 const double tolerance = 1e-6;
720 for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) {
721 // Inprocessing of the constraint.
722 if (simplify_constraints &&
723 SimplifyConstraint(&constraint_infos_[i].constraint)) {
724 ++num_simplifications_;
725
726 // Note that the canonicalization shouldn't be needed since the order
727 // of the variable is not changed by the simplification, and we only
728 // reduce the coefficients at both end of the spectrum.
729 DivideByGCD(&constraint_infos_[i].constraint);
730 DCHECK(DebugCheckConstraint(constraint_infos_[i].constraint));
731
732 constraint_infos_[i].objective_parallelism_computed = false;
733 constraint_infos_[i].l2_norm =
734 ComputeL2Norm(constraint_infos_[i].constraint);
735 FillDerivedFields(&constraint_infos_[i]);
736
737 if (constraint_infos_[i].is_in_lp) current_lp_is_changed_ = true;
738 equiv_constraints_.erase(constraint_infos_[i].hash);
739 constraint_infos_[i].hash =
740 ComputeHashOfTerms(constraint_infos_[i].constraint);
741
742 // TODO(user): Because we simplified this constraint, it is possible that
743 // it is now a duplicate of another one. Merge them.
744 equiv_constraints_[constraint_infos_[i].hash] = i;
745 }
746
747 if (constraint_infos_[i].is_in_lp) continue;
748
749 // ComputeActivity() often represent the bulk of the time spent in
750 // ChangeLP().
751 dtime_ +=
752 1.7e-9 * static_cast<double>(constraint_infos_[i].constraint.num_terms);
753 const double activity =
754 ComputeActivity(constraint_infos_[i].constraint, expanded_lp_solution_);
755 const double lb_violation =
756 ToDouble(constraint_infos_[i].constraint.lb) - activity;
757 const double ub_violation =
758 activity - ToDouble(constraint_infos_[i].constraint.ub);
759 const double violation = std::max(lb_violation, ub_violation);
760 if (violation >= tolerance) {
761 constraint_infos_[i].inactive_count = 0;
762 if (objective_is_defined_ &&
763 !constraint_infos_[i].objective_parallelism_computed) {
764 ComputeObjectiveParallelism(i);
765 } else if (!objective_is_defined_) {
766 constraint_infos_[i].objective_parallelism = 0.0;
767 }
768
769 const double score = violation / constraint_infos_[i].l2_norm +
770 constraint_infos_[i].objective_parallelism;
771 new_constraints_by_score.push_back({i, score});
772
773 if (constraint_infos_[i].is_deletable) {
774 constraint_infos_[i].active_count += constraint_active_count_increase_;
775 if (constraint_infos_[i].active_count >
776 sat_parameters_.cut_max_active_count_value()) {
777 rescale_active_count = true;
778 }
779 }
780 }
781 }
782
783 // Bump activities of active constraints in LP.
784 if (solution_state != nullptr) {
785 const glop::RowIndex num_rows(lp_constraints_.size());
786 const glop::ColIndex num_cols =
787 solution_state->statuses.size() - RowToColIndex(num_rows);
788
789 for (int i = 0; i < num_rows; ++i) {
790 const ConstraintIndex constraint_index = lp_constraints_[i];
791 const glop::VariableStatus row_status =
792 solution_state->statuses[num_cols + glop::ColIndex(i)];
793 if (row_status != glop::VariableStatus::BASIC &&
794 constraint_infos_[constraint_index].is_deletable) {
795 constraint_infos_[constraint_index].active_count +=
796 constraint_active_count_increase_;
797 if (constraint_infos_[constraint_index].active_count >
798 sat_parameters_.cut_max_active_count_value()) {
799 rescale_active_count = true;
800 }
801 }
802 }
803 }
804
805 if (rescale_active_count) {
806 CHECK_GT(sat_parameters_.cut_max_active_count_value(), 0.0);
807 RescaleActiveCounts(1.0 / sat_parameters_.cut_max_active_count_value());
808 }
809
810 // Update the increment counter.
811 constraint_active_count_increase_ *=
812 1.0 / sat_parameters_.cut_active_count_decay();
813
814 // Remove constraints from the current LP that have been inactive for a while.
815 // We do that after we computed new_constraints so we do not need to iterate
816 // over the just deleted constraints.
817 if (MaybeRemoveSomeInactiveConstraints(solution_state)) {
818 current_lp_is_changed_ = true;
819 }
820
821 // Note that the algo below is in O(limit * new_constraint). In order to
822 // limit spending too much time on this, we first sort all the constraints
823 // with an imprecise score (no orthogonality), then limit the size of the
824 // vector of constraints to precisely score, then we do the actual scoring.
825 //
826 // On problem crossword_opt_grid-19.05_dict-80_sat with linearization_level=2,
827 // new_constraint.size() > 1.5M.
828 //
829 // TODO(user): This blowup factor could be adaptative w.r.t. the constraint
830 // limit.
831 const int kBlowupFactor = 4;
832 const int current_size = static_cast<int>(new_constraints_by_score.size());
833 int constraint_limit =
834 std::min(sat_parameters_.new_constraints_batch_size(), current_size);
835 if (lp_constraints_.empty()) {
836 constraint_limit = std::min(1000, current_size);
837 }
838 VLOG(3) << " - size = " << current_size << ", limit = " << constraint_limit;
839
840 std::stable_sort(new_constraints_by_score.begin(),
841 new_constraints_by_score.end(),
842 [&](std::pair<ConstraintIndex, double> a,
843 std::pair<ConstraintIndex, double> b) {
844 return a.second > b.second;
845 });
846 if (new_constraints_by_score.size() > kBlowupFactor * constraint_limit) {
847 VLOG(3) << "Resize candidate constraints from "
848 << new_constraints_by_score.size() << " down to "
849 << kBlowupFactor * constraint_limit;
850 new_constraints_by_score.resize(kBlowupFactor * constraint_limit);
851 }
852
853 int num_added = 0;
854 TimeLimitCheckEveryNCalls time_limit_check(1000, time_limit_);
855 ConstraintIndex last_added_candidate = kInvalidConstraintIndex;
856 std::vector<double> orthogonality_score(new_constraints_by_score.size(), 1.0);
857 for (int i = 0; i < constraint_limit; ++i) {
858 // Iterate through all new constraints and select the one with the best
859 // score.
860 //
861 // TODO(user): find better algo, this does 1000 * 4000 scalar product!
862 double best_score = 0.0;
863 ConstraintIndex best_candidate = kInvalidConstraintIndex;
864 for (int j = 0; j < new_constraints_by_score.size(); ++j) {
865 // Checks the time limit, and returns if the lp has changed.
866 if (time_limit_check.LimitReached()) return current_lp_is_changed_;
867
868 const ConstraintIndex new_index = new_constraints_by_score[j].first;
869 if (constraint_infos_[new_index].is_in_lp) continue;
870
871 if (last_added_candidate != kInvalidConstraintIndex) {
872 const double current_orthogonality =
873 1.0 - (std::abs(ScalarProduct(
874 constraint_infos_[last_added_candidate].constraint,
875 constraint_infos_[new_index].constraint)) /
876 (constraint_infos_[last_added_candidate].l2_norm *
877 constraint_infos_[new_index].l2_norm));
878 orthogonality_score[j] =
879 std::min(orthogonality_score[j], current_orthogonality);
880 }
881
882 // NOTE(user): It is safe to not add this constraint as the constraint
883 // that is almost parallel to this constraint is present in the LP or is
884 // inactive for a long time and is removed from the LP. In either case,
885 // this constraint is not adding significant value and is only making the
886 // LP larger.
887 if (orthogonality_score[j] <
888 sat_parameters_.min_orthogonality_for_lp_constraints()) {
889 continue;
890 }
891
892 // TODO(user): Experiment with different weights or different
893 // functions for computing score.
894 const double score =
895 orthogonality_score[j] + new_constraints_by_score[j].second;
896 CHECK_GE(score, 0.0);
897 if (score > best_score || best_candidate == kInvalidConstraintIndex) {
898 best_score = score;
899 best_candidate = new_index;
900 }
901 }
902
903 if (best_candidate != kInvalidConstraintIndex) {
904 // Add the best constraint in the LP.
905 constraint_infos_[best_candidate].is_in_lp = true;
906 // Note that it is important for LP incremental solving that the old
907 // constraints stays at the same position in this list (and thus in the
908 // returned GetLp()).
909 ++num_added;
910 current_lp_is_changed_ = true;
911 lp_constraints_.push_back(best_candidate);
912 last_added_candidate = best_candidate;
913 } else {
914 // Abort the addition loop.
915 break;
916 }
917 }
918
919 if (num_new_constraints != nullptr) {
920 *num_new_constraints = num_added;
921 }
922 if (num_added > 0) {
923 // We update the solution sate to match the new LP size.
924 VLOG(3) << "Added " << num_added << " constraints.";
925 solution_state->statuses.resize(solution_state->statuses.size() + num_added,
927 }
928
929 // TODO(user): Instead of comparing num_deletable_constraints with cut
930 // limit, compare number of deletable constraints not in lp against the limit.
931 if (num_deletable_constraints_ > sat_parameters_.max_num_cuts()) {
932 PermanentlyRemoveSomeConstraints();
933 }
934
935 time_limit_->AdvanceDeterministicTime(dtime_ - saved_dtime);
936
937 // The LP changed only if we added new constraints or if some constraints
938 // already inside changed (simplification or tighter bounds).
939 if (current_lp_is_changed_) {
940 current_lp_is_changed_ = false;
941 return true;
942 }
943 return false;
944}
945
947 for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) {
948 if (constraint_infos_[i].is_in_lp) continue;
949 if (constraint_infos_[i].constraint.num_terms == 0) continue;
950
951 constraint_infos_[i].is_in_lp = true;
952 lp_constraints_.push_back(i);
953 }
954}
955
957 const LinearConstraint& cut) {
958 if (model_->Get<DebugSolution>() == nullptr) return true;
959 const auto& debug_solution = *(model_->Get<DebugSolution>());
960
961 IntegerValue activity(0);
962 for (int i = 0; i < cut.num_terms; ++i) {
963 const IntegerVariable var = cut.vars[i];
964 const IntegerValue coeff = cut.coeffs[i];
965 CHECK(debug_solution.ivar_has_value[var]);
966 activity += coeff * debug_solution.ivar_values[var];
967 }
968 if (activity > cut.ub || activity < cut.lb) {
969 LOG(INFO) << cut.DebugString();
970 LOG(INFO) << "activity " << activity << " not in [" << cut.lb << ","
971 << cut.ub << "]";
972 return false;
973 }
974 return true;
975}
976
978 LinearConstraint ct, absl::string_view name,
980 if (ct.num_terms == 0) return;
981 const double normalized_violation = ct.NormalizedViolation(lp_solution);
982 cuts_.Add({std::string(name), std::move(ct)}, normalized_violation);
983}
984
986 for (CutCandidate& candidate : *cuts_.MutableUnorderedElements()) {
987 manager->AddCut(std::move(candidate.cut), candidate.name);
988 }
989 cuts_.Clear();
990}
991
992} // namespace sat
993} // namespace operations_research
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition integer.h:1419
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Returns globally valid lower/upper bound on the given integer variable.
Definition integer.h:1412
ConstraintIndex Add(LinearConstraint ct, bool *added=nullptr, bool *folded=nullptr)
bool ChangeLp(glop::BasisState *solution_state, int *num_new_constraints=nullptr)
bool AddCut(LinearConstraint ct, std::string type_name, std::string extra_info="")
bool UpdateConstraintUb(glop::RowIndex index_in_lp, IntegerValue new_ub)
bool UpdateConstraintLb(glop::RowIndex index_in_lp, IntegerValue new_lb)
These must be level zero bounds.
void SetObjectiveCoefficient(IntegerVariable var, IntegerValue coeff)
bool FoldLinearConstraint(LinearConstraint *ct, bool *folded=nullptr)
void AddSymmetryOrbit(IntegerVariable sum_var, absl::Span< const IntegerVariable > orbit)
bool IsOrbitSumVar(IntegerVariable var) const
Returns true iff var is one of the sum_var passed to AddSymmetryOrbit().
Simple class to add statistics by name and print them at the end.
void TransferToManager(LinearConstraintManager *manager)
Empty the local pool and add all its content to the manager.
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
Adds a cut to the local pool.
double ComputeActivity(const LinearConstraint &constraint, const util_intops::StrongVector< IntegerVariable, double > &values)
void DivideByGCD(LinearConstraint *constraint)
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
IntType IntTypeAbs(IntType t)
double ComputeL2Norm(const LinearConstraint &ct)
Returns sqrt(sum square(coeff)).
IntegerValue CeilRatio(IntegerValue dividend, IntegerValue positive_divisor)
IntegerValue ComputeInfinityNorm(const LinearConstraint &ct)
Returns the maximum absolute value of the coefficients.
std::vector< IntegerVariable > NegationOf(absl::Span< const IntegerVariable > vars)
Returns the vector of the negated variables.
Definition integer.cc:52
bool PossibleOverflow(const IntegerTrail &integer_trail, const LinearConstraint &constraint)
PositiveOnlyIndex GetPositiveOnlyIndex(IntegerVariable var)
void MakeAllVariablesPositive(LinearConstraint *constraint)
Makes all variables "positive" by transforming a variable to its negation.
bool NoDuplicateVariable(const LinearConstraint &ct)
Returns false if duplicate variables are found in ct.
bool VariableIsPositive(IntegerVariable i)
double ToDouble(IntegerValue value)
double ScalarProduct(const LinearConstraint &ct1, const LinearConstraint &ct2)
In SWIG mode, we don't want anything besides these top-level includes.
bool AtMinOrMaxInt64(int64_t x)
Checks if x is equal to the min or the max value of an int64_t.
int64_t CapSub(int64_t x, int64_t y)
int64_t CapProd(int64_t x, int64_t y)
uint64_t Hash(uint64_t num, uint64_t c)
Definition hash.h:73
std::unique_ptr< IntegerValue[]> coeffs
std::unique_ptr< IntegerVariable[]> vars
absl::Span< const IntegerVariable > VarsAsSpan() const
double NormalizedViolation(const util_intops::StrongVector< IntegerVariable, double > &lp_values) const