Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
linear_programming_constraint.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <array>
18#include <cmath>
19#include <cstddef>
20#include <cstdint>
21#include <functional>
22#include <limits>
23#include <memory>
24#include <numeric>
25#include <optional>
26#include <string>
27#include <utility>
28#include <vector>
29
30#include "absl/algorithm/container.h"
31#include "absl/cleanup/cleanup.h"
32#include "absl/container/flat_hash_map.h"
33#include "absl/log/check.h"
34#include "absl/log/log.h"
35#include "absl/log/vlog_is_on.h"
36#include "absl/numeric/int128.h"
37#include "absl/strings/str_cat.h"
38#include "absl/strings/str_format.h"
39#include "absl/strings/string_view.h"
40#include "absl/types/span.h"
47#include "ortools/glop/status.h"
54#include "ortools/sat/cuts.h"
56#include "ortools/sat/integer.h"
62#include "ortools/sat/model.h"
67#include "ortools/sat/util.h"
69#include "ortools/util/bitset.h"
70#include "ortools/util/rev.h"
74
75namespace operations_research {
76namespace sat {
77
78using glop::ColIndex;
80using glop::RowIndex;
81
83 if (is_sparse_) {
84 for (const glop::ColIndex col : non_zeros_) {
85 dense_vector_[col] = IntegerValue(0);
86 }
87 dense_vector_.resize(size, IntegerValue(0));
88 } else {
89 dense_vector_.assign(size, IntegerValue(0));
90 }
91 for (const glop::ColIndex col : non_zeros_) {
92 is_zeros_[col] = true;
93 }
94 is_zeros_.resize(size, true);
95 non_zeros_.clear();
96 is_sparse_ = true;
97}
98
99bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) {
100 const int64_t add = CapAdd(value.value(), dense_vector_[col].value());
101 if (add == std::numeric_limits<int64_t>::min() ||
102 add == std::numeric_limits<int64_t>::max())
103 return false;
104 dense_vector_[col] = IntegerValue(add);
105 if (is_sparse_ && is_zeros_[col]) {
106 is_zeros_[col] = false;
107 non_zeros_.push_back(col);
108 }
109 return true;
110}
111
112template <bool check_overflow>
114 const IntegerValue multiplier, absl::Span<const glop::ColIndex> cols,
115 absl::Span<const IntegerValue> coeffs, IntegerValue max_coeff_magnitude) {
116 // Since we have the norm, this avoid checking each products below.
117 if (check_overflow) {
118 const IntegerValue prod = CapProdI(max_coeff_magnitude, multiplier);
119 if (AtMinOrMaxInt64(prod.value())) return false;
120 }
121
122 IntegerValue* data = dense_vector_.data();
123 const double threshold = 0.1 * static_cast<double>(dense_vector_.size());
124 const int num_terms = cols.size();
125 if (is_sparse_ && static_cast<double>(num_terms) < threshold) {
126 for (int i = 0; i < num_terms; ++i) {
127 const glop::ColIndex col = cols[i];
128 if (is_zeros_[col]) {
129 is_zeros_[col] = false;
130 non_zeros_.push_back(col);
131 }
132 const IntegerValue product = multiplier * coeffs[i];
133 if (check_overflow) {
134 if (AddIntoOverflow(product.value(),
135 data[col.value()].mutable_value())) {
136 return false;
137 }
138 } else {
139 data[col.value()] += product;
140 }
141 }
142 if (static_cast<double>(non_zeros_.size()) > threshold) {
143 is_sparse_ = false;
144 }
145 } else {
146 is_sparse_ = false;
147 for (int i = 0; i < num_terms; ++i) {
148 const glop::ColIndex col = cols[i];
149 const IntegerValue product = multiplier * coeffs[i];
150 if (check_overflow) {
151 if (AddIntoOverflow(product.value(),
152 data[col.value()].mutable_value())) {
153 return false;
154 }
155 } else {
156 data[col.value()] += product;
157 }
158 }
159 }
160 return true;
161}
162
163// Force instantations for tests.
165 IntegerValue multiplier, absl::Span<const glop::ColIndex> cols,
166 absl::Span<const IntegerValue> coeffs, IntegerValue max_coeff_magnitude);
167
169 IntegerValue multiplier, absl::Span<const glop::ColIndex> cols,
170 absl::Span<const IntegerValue> coeffs, IntegerValue max_coeff_magnitude);
171
173 absl::Span<const IntegerVariable> integer_variables,
174 IntegerValue upper_bound,
175 std::optional<std::pair<IntegerVariable, IntegerValue>> extra_term) {
176 // We first do one pass to compute the exact size and not overallocate.
177 int final_size = 0;
178 if (is_sparse_) {
179 for (const glop::ColIndex col : non_zeros_) {
180 const IntegerValue coeff = dense_vector_[col];
181 if (coeff == 0) continue;
182 ++final_size;
183 }
184 } else {
185 for (const IntegerValue coeff : dense_vector_) {
186 if (coeff != 0) ++final_size;
187 }
188 }
189 if (extra_term != std::nullopt) ++final_size;
190
191 // Allocate once.
192 LinearConstraint result;
193 result.resize(final_size);
194
195 // Copy terms.
196 int new_size = 0;
197 if (is_sparse_) {
198 std::sort(non_zeros_.begin(), non_zeros_.end());
199 for (const glop::ColIndex col : non_zeros_) {
200 const IntegerValue coeff = dense_vector_[col];
201 if (coeff == 0) continue;
202 result.vars[new_size] = integer_variables[col.value()];
203 result.coeffs[new_size] = coeff;
204 ++new_size;
205 }
206 } else {
207 const int size = dense_vector_.size();
208 for (glop::ColIndex col(0); col < size; ++col) {
209 const IntegerValue coeff = dense_vector_[col];
210 if (coeff == 0) continue;
211 result.vars[new_size] = integer_variables[col.value()];
212 result.coeffs[new_size] = coeff;
213 ++new_size;
214 }
215 }
216
217 result.lb = kMinIntegerValue;
218 result.ub = upper_bound;
219
220 if (extra_term != std::nullopt) {
221 result.vars[new_size] = extra_term->first;
222 result.coeffs[new_size] = extra_term->second;
223 ++new_size;
224 }
225
226 CHECK_EQ(new_size, final_size);
227 DivideByGCD(&result);
228 return result;
229}
230
232 absl::int128 rhs, absl::Span<const IntegerVariable> integer_variables,
233 absl::Span<const double> lp_solution, IntegerTrail* integer_trail,
234 CutData* result) {
235 result->terms.clear();
236 result->rhs = rhs;
237 absl::Span<const IntegerValue> dense_vector = dense_vector_;
238 if (is_sparse_) {
239 std::sort(non_zeros_.begin(), non_zeros_.end());
240 for (const glop::ColIndex col : non_zeros_) {
241 const IntegerValue coeff = dense_vector[col.value()];
242 if (coeff == 0) continue;
243 const IntegerVariable var = integer_variables[col.value()];
244 CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()],
245 integer_trail->LevelZeroLowerBound(var),
246 integer_trail->LevelZeroUpperBound(var)));
247 }
248 } else {
249 for (int col(0); col < dense_vector.size(); ++col) {
250 const IntegerValue coeff = dense_vector[col];
251 if (coeff == 0) continue;
252 const IntegerVariable var = integer_variables[col];
253 CHECK(result->AppendOneTerm(var, coeff, lp_solution[col],
254 integer_trail->LevelZeroLowerBound(var),
255 integer_trail->LevelZeroUpperBound(var)));
256 }
257 }
258}
259
260std::vector<std::pair<glop::ColIndex, IntegerValue>>
262 std::vector<std::pair<glop::ColIndex, IntegerValue>> result;
263 if (is_sparse_) {
264 std::sort(non_zeros_.begin(), non_zeros_.end());
265 for (const glop::ColIndex col : non_zeros_) {
266 const IntegerValue coeff = dense_vector_[col];
267 if (coeff != 0) result.push_back({col, coeff});
268 }
269 } else {
270 const int size = dense_vector_.size();
271 for (glop::ColIndex col(0); col < size; ++col) {
272 const IntegerValue coeff = dense_vector_[col];
273 if (coeff != 0) result.push_back({col, coeff});
274 }
275 }
276 return result;
277}
278
279// TODO(user): make SatParameters singleton too, otherwise changing them after
280// a constraint was added will have no effect on this class.
282 Model* model, absl::Span<const IntegerVariable> vars)
283 : constraint_manager_(model),
284 parameters_(*(model->GetOrCreate<SatParameters>())),
285 model_(model),
286 time_limit_(model->GetOrCreate<TimeLimit>()),
287 integer_trail_(model->GetOrCreate<IntegerTrail>()),
288 trail_(model->GetOrCreate<Trail>()),
289 sat_solver_(model->GetOrCreate<SatSolver>()),
290 watcher_(model->GetOrCreate<GenericLiteralWatcher>()),
291 product_detector_(model->GetOrCreate<ProductDetector>()),
292 objective_definition_(model->GetOrCreate<ObjectiveDefinition>()),
293 shared_stats_(model->GetOrCreate<SharedStatistics>()),
294 shared_response_manager_(model->GetOrCreate<SharedResponseManager>()),
295 random_(model->GetOrCreate<ModelRandomGenerator>()),
296 symmetrizer_(model->GetOrCreate<LinearConstraintSymmetrizer>()),
297 linear_propagator_(model->GetOrCreate<LinearPropagator>()),
298 cover_cut_helper_(model),
299 integer_rounding_cut_helper_(model),
300 rlt_cut_helper_(model),
301 implied_bounds_processor_({}, integer_trail_,
302 model->GetOrCreate<ImpliedBounds>()),
303 dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
304 mirror_lp_variable_(*model->GetOrCreate<ModelLpVariableMapping>()),
305 expanded_lp_solution_(*model->GetOrCreate<ModelLpValues>()),
306 expanded_reduced_costs_(*model->GetOrCreate<ModelReducedCosts>()) {
307 // Tweak the default parameters to make the solve incremental.
308 simplex_params_.set_use_dual_simplex(true);
309 simplex_params_.set_primal_feasibility_tolerance(
310 parameters_.lp_primal_tolerance());
311 simplex_params_.set_dual_feasibility_tolerance(
312 parameters_.lp_dual_tolerance());
313 if (parameters_.use_exact_lp_reason()) {
314 simplex_params_.set_change_status_to_imprecise(false);
315 }
316 simplex_.SetParameters(simplex_params_);
317 // Warning: SetRandom() must be called after SetParameters().
318 simplex_.SetRandom(*random_);
319 if (parameters_.search_branching() == SatParameters::LP_SEARCH) {
320 compute_reduced_cost_averages_ = true;
321 }
322
323 // Register our local rev int repository.
324 integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
325 mirror_lp_variable_.resize(integer_trail_->NumIntegerVariables(),
327
328 // Initialize the IntegerVariable -> ColIndex mapping.
329 CHECK(std::is_sorted(vars.begin(), vars.end()));
330
331 // TODO(user): We shouldn't need to add variable from the orbit here in the
332 // presence of symmetry. However they can still appear in cut, so it is a
333 // bit tricky and require some refactoring to be tried.
334 ColIndex col{0};
335 for (const IntegerVariable positive_variable : vars) {
336 CHECK(VariableIsPositive(positive_variable));
337 implied_bounds_processor_.AddLpVariable(positive_variable);
338 (*dispatcher_)[positive_variable] = this;
339
340 if (!symmetrizer_->AppearInFoldedProblem(positive_variable)) continue;
341
342 integer_variables_.push_back(positive_variable);
343 extended_integer_variables_.push_back(positive_variable);
344 DCHECK_EQ(mirror_lp_variable_[positive_variable], glop::kInvalidCol);
345 mirror_lp_variable_[positive_variable] = col;
346 ++col;
347 }
348
349 // Complete the extended variables with the orbit afterwards.
350 if (symmetrizer_->HasSymmetry()) {
351 for (const IntegerVariable var : integer_variables_) {
352 if (!symmetrizer_->IsOrbitSumVar(var)) continue;
353 const int orbit_index = symmetrizer_->OrbitIndex(var);
354 for (const IntegerVariable var : symmetrizer_->Orbit(orbit_index)) {
355 extended_integer_variables_.push_back(var);
356 DCHECK_EQ(mirror_lp_variable_[var], glop::kInvalidCol);
357 mirror_lp_variable_[var] = col;
358 ++col;
359 }
360 }
361 }
362
363 // We use the "extended" vector for lp_solution_/lp_reduced_cost_.
364 CHECK_EQ(vars.size(), extended_integer_variables_.size());
365 lp_solution_.assign(vars.size(), std::numeric_limits<double>::infinity());
366 lp_reduced_cost_.assign(vars.size(), 0.0);
367
368 if (!vars.empty()) {
369 const IntegerVariable max_index = std::max(
370 NegationOf(vars.back()), integer_trail_->NumIntegerVariables() - 1);
371 if (max_index >= expanded_lp_solution_.size()) {
372 expanded_lp_solution_.assign(max_index.value() + 1, 0.0);
373 }
374 if (max_index >= expanded_reduced_costs_.size()) {
375 expanded_reduced_costs_.assign(max_index.value() + 1, 0.0);
376 }
377 }
378}
379
381 if (!VLOG_IS_ON(1)) return;
382 std::vector<std::pair<std::string, int64_t>> stats;
383 stats.push_back({"LinearProgrammingConstraint/num_root_level_skips",
384 num_root_level_skips_});
385 stats.push_back({"LinearProgrammingConstraint/num_root_level_solves",
386 num_root_level_solves_});
387 shared_stats_->AddStats(stats);
388}
389
391 DCHECK(!lp_constraint_is_registered_);
392 bool added = false;
393 bool folded = false;
394 const auto index = constraint_manager_.Add(std::move(ct), &added, &folded);
395
396 // If we added a folded constraints, lets add it to the CP propagators.
397 // This is important as this should tighten the bounds of the orbit sum vars.
398 if (added && folded) {
399 const auto& info = constraint_manager_.AllConstraints()[index];
400 const LinearConstraint& new_ct = info.constraint;
401 const absl::Span<const IntegerVariable> vars = new_ct.VarsAsSpan();
402 if (!info.ub_is_trivial) {
403 if (!linear_propagator_->AddConstraint({}, vars, new_ct.CoeffsAsSpan(),
404 new_ct.ub)) {
405 return false;
406 }
407 }
408 if (!info.lb_is_trivial) {
409 tmp_vars_.assign(vars.begin(), vars.end());
410 for (IntegerVariable& var : tmp_vars_) var = NegationOf(var);
411 if (!linear_propagator_->AddConstraint(
412 {}, tmp_vars_, new_ct.CoeffsAsSpan(), -new_ct.lb)) {
413 return false;
414 }
415 }
416 }
417 return true;
418}
419
421 DCHECK(!lp_constraint_is_registered_);
422 lp_constraint_is_registered_ = true;
423 model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
424
425 // Copy objective data to the constraint_manager_.
426 //
427 // Note(user): the sort is not really needed but should lead to better cache
428 // locality.
429 std::sort(integer_objective_.begin(), integer_objective_.end());
430 objective_infinity_norm_ = 0;
431 for (const auto [col, coeff] : integer_objective_) {
432 constraint_manager_.SetObjectiveCoefficient(integer_variables_[col.value()],
433 coeff);
434 objective_infinity_norm_ =
435 std::max(objective_infinity_norm_, IntTypeAbs(coeff));
436 }
437
438 // Set the LP to its initial content.
439 //
440 // Note that we always add LP constraint lazily if we have A LOT of them.
441 // This is because currently on large problem with millions of constraints,
442 // our LP is usually not fast enough anyway.
443 if (!parameters_.add_lp_constraints_lazily() &&
444 constraint_manager_.num_constraints() < 1e6) {
445 constraint_manager_.AddAllConstraintsToLp();
446 }
447 if (!CreateLpFromConstraintManager()) {
448 model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
449 return;
450 }
451
452 watcher_id_ = watcher_->Register(this);
453 const int num_vars = integer_variables_.size();
454 orbit_indices_.clear();
455 for (int i = 0; i < num_vars; i++) {
456 const IntegerVariable pos_var = integer_variables_[i];
457 DCHECK(symmetrizer_->AppearInFoldedProblem(pos_var));
458 watcher_->WatchIntegerVariable(pos_var, watcher_id_, i);
459 if (symmetrizer_->IsOrbitSumVar(pos_var)) {
460 orbit_indices_.push_back(symmetrizer_->OrbitIndex(pos_var));
461 }
462 }
463 if (objective_is_defined_) {
464 watcher_->WatchUpperBound(objective_cp_, watcher_id_);
465 }
466 watcher_->SetPropagatorPriority(watcher_id_, 2);
467 watcher_->AlwaysCallAtLevelZero(watcher_id_);
468
469 // Registering it with the trail make sure this class is always in sync when
470 // it is used in the decision heuristics.
471 integer_trail_->RegisterReversibleClass(this);
472 watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_);
473}
474
475glop::ColIndex LinearProgrammingConstraint::GetMirrorVariable(
476 IntegerVariable positive_variable) {
477 DCHECK(VariableIsPositive(positive_variable));
478 return mirror_lp_variable_[positive_variable];
479}
480
482 IntegerValue coeff) {
483 CHECK(!lp_constraint_is_registered_);
484 objective_is_defined_ = true;
485 IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
486 if (ivar != pos_var) coeff = -coeff;
487 integer_objective_.push_back({GetMirrorVariable(pos_var), coeff});
488}
489
490// TODO(user): As the search progress, some variables might get fixed. Exploit
491// this to reduce the number of variables in the LP and in the
492// ConstraintManager? We might also detect during the search that two variable
493// are equivalent.
494//
495// TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
496// running time. We should be able to almost remove most of this from the
497// profile by being more incremental (modulo LP scaling).
498//
499// TODO(user): A longer term idea for LP with a lot of variables is to not
500// add all variables to each LP solve and do some "sifting". That can be useful
501// for TSP for instance where the number of edges is large, but only a small
502// fraction will be used in the optimal solution.
503bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
504 // Fill integer_lp_.
505 integer_lp_.clear();
506 integer_lp_cols_.clear();
507 integer_lp_coeffs_.clear();
508
509 infinity_norms_.clear();
510 const auto& all_constraints = constraint_manager_.AllConstraints();
511 for (const auto index : constraint_manager_.LpConstraints()) {
512 const LinearConstraint& ct = all_constraints[index].constraint;
513 if (ct.lb > ct.ub) {
514 VLOG(1) << "Trivial infeasible bound in an LP constraint";
515 return false;
516 }
517
518 integer_lp_.push_back(LinearConstraintInternal());
519 LinearConstraintInternal& new_ct = integer_lp_.back();
520 new_ct.lb = ct.lb;
521 new_ct.ub = ct.ub;
522 new_ct.lb_is_trivial = all_constraints[index].lb_is_trivial;
523 new_ct.ub_is_trivial = all_constraints[index].ub_is_trivial;
524
525 IntegerValue infinity_norm = 0;
526 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
527 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
528 new_ct.start_in_buffer = integer_lp_cols_.size();
529
530 // TODO(user): Make sure we don't have empty constraint!
531 // this currently can happen in some corner cases.
532 const int size = ct.num_terms;
533 new_ct.num_terms = size;
534 for (int i = 0; i < size; ++i) {
535 // We only use positive variable inside this class.
536 const IntegerVariable var = ct.vars[i];
537 const IntegerValue coeff = ct.coeffs[i];
538 infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
539 integer_lp_cols_.push_back(GetMirrorVariable(var));
540 integer_lp_coeffs_.push_back(coeff);
541 }
542 infinity_norms_.push_back(infinity_norm);
543
544 // It is important to keep lp_data_ "clean".
545 DCHECK(std::is_sorted(
546 integer_lp_cols_.data() + new_ct.start_in_buffer,
547 integer_lp_cols_.data() + new_ct.start_in_buffer + new_ct.num_terms));
548 }
549
550 // We remove fixed variables from the objective. This should help the LP
551 // scaling, but also our integer reason computation.
552 int new_size = 0;
553 objective_infinity_norm_ = 0;
554 for (const auto& entry : integer_objective_) {
555 const IntegerVariable var = integer_variables_[entry.first.value()];
556 if (integer_trail_->IsFixedAtLevelZero(var)) {
557 integer_objective_offset_ +=
558 entry.second * integer_trail_->LevelZeroLowerBound(var);
559 continue;
560 }
561 objective_infinity_norm_ =
562 std::max(objective_infinity_norm_, IntTypeAbs(entry.second));
563 integer_objective_[new_size++] = entry;
564 }
565 integer_objective_.resize(new_size);
566 objective_infinity_norm_ =
567 std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_));
568
569 // Scale everything.
570 // TODO(user): As we have an idea of the LP optimal after the first solves,
571 // maybe we can adapt the scaling accordingly.
572 ComputeIntegerLpScalingFactors();
573
574 // Tricky: we use level zero bounds here for the second scaling step below.
575 FillLpData();
576
577 // Fills the helper.
578 scaler_.ConfigureFromFactors(row_factors_, col_factors_);
579 scaler_.AverageCostScaling(&obj_with_slack_);
580 scaler_.ContainOneBoundScaling(simplex_.MutableLowerBounds(),
581 simplex_.MutableUpperBounds());
582
583 // Since we used level zero bounds above, fix them.
584 UpdateBoundsOfLpVariables();
585
586 // Set the information for the step to polish the LP basis. All our variables
587 // are integer, but for now, we just try to minimize the fractionality of the
588 // binary variables.
589 if (parameters_.polish_lp_solution()) {
590 simplex_.ClearIntegralityScales();
591 const int num_vars = integer_variables_.size();
592 for (int i = 0; i < num_vars; ++i) {
593 const IntegerVariable cp_var = integer_variables_[i];
594 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var);
595 const IntegerValue ub = integer_trail_->LevelZeroUpperBound(cp_var);
596 if (lb != 0 || ub != 1) continue;
597 simplex_.SetIntegralityScale(
598 glop::ColIndex(i),
599 1.0 / scaler_.VariableScalingFactor(glop::ColIndex(i)));
600 }
601 }
602
603 VLOG(3) << "LP relaxation: " << integer_lp_.size() << " x "
604 << integer_variables_.size() << ". "
605 << constraint_manager_.AllConstraints().size()
606 << " Managed constraints.";
607 return true;
608}
609
610// TODO(user): This is a duplicate of glop scaling code, but it allows to
611// work directly on our representation...
612void LinearProgrammingConstraint::ComputeIntegerLpScalingFactors() {
613 const int num_rows = integer_lp_.size();
614 const int num_cols = integer_variables_.size();
615
616 // Assign vectors.
617 const double infinity = std::numeric_limits<double>::infinity();
618 row_factors_.assign(num_rows, 1.0);
619 col_factors_.assign(num_cols, 1.0);
620
621 // Cache pointers to avoid refetching them.
622 IntegerValue* coeffs = integer_lp_coeffs_.data();
623 glop::ColIndex* cols = integer_lp_cols_.data();
624 double* row_factors = row_factors_.data();
625 double* col_factors = col_factors_.data();
626
627 col_min_.assign(num_cols, infinity);
628 col_max_.assign(num_cols, 0.0);
629 double* col_min = col_min_.data();
630 double* col_max = col_max_.data();
631
632 for (int i = 0; i < 4; ++i) {
633 // Scale row geometrically.
634 for (int row = 0; row < num_rows; ++row) {
635 double min_scaled = +infinity;
636 double max_scaled = 0.0;
637 const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)];
638 for (int i = 0; i < ct.num_terms; ++i) {
639 const int index = ct.start_in_buffer + i;
640 const int col = cols[index].value();
641 const double coeff = static_cast<double>(coeffs[index].value());
642 const double scaled_magnitude = col_factors[col] * std::abs(coeff);
643 min_scaled = std::min(min_scaled, scaled_magnitude);
644 max_scaled = std::max(max_scaled, scaled_magnitude);
645 }
646
647 if (ct.num_terms == 0) continue;
648 const Fractional factor(std::sqrt(max_scaled * min_scaled));
649 row_factors[row] = 1.0 / factor;
650 }
651
652 // Scale columns geometrically.
653 for (int row = 0; row < num_rows; ++row) {
654 const double row_factor = row_factors[row];
655 const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)];
656 for (int i = 0; i < ct.num_terms; ++i) {
657 const int index = ct.start_in_buffer + i;
658 const int col = cols[index].value();
659 const double coeff = static_cast<double>(coeffs[index].value());
660 const double scaled_magnitude = row_factor * std::abs(coeff);
661 col_min[col] = std::min(col_min[col], scaled_magnitude);
662 col_max[col] = std::max(col_max[col], scaled_magnitude);
663 }
664 }
665 for (int col = 0; col < num_cols; ++col) {
666 if (col_min[col] == infinity) continue; // Empty.
667 col_factors[col] = 1.0 / std::sqrt(col_min[col] * col_max[col]);
668
669 // Reset, in case we have many fixed variable, faster than assign again.
670 col_min[col] = infinity;
671 col_max[col] = 0;
672 }
673 }
674
675 // Now we equilibrate (i.e. just divide by the max) the row
676 for (int row = 0; row < num_rows; ++row) {
677 double max_scaled = 0.0;
678 const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)];
679 for (int i = 0; i < ct.num_terms; ++i) {
680 const int index = ct.start_in_buffer + i;
681 const int col = cols[index].value();
682 const double coeff = static_cast<double>(coeffs[index].value());
683 const double scaled_magnitude = col_factors[col] * std::abs(coeff);
684 max_scaled = std::max(max_scaled, scaled_magnitude);
685 }
686 if (ct.num_terms == 0) continue;
687 row_factors[row] = 1.0 / max_scaled;
688 }
689
690 // And finally the columns.
691 for (int row = 0; row < num_rows; ++row) {
692 const double row_factor = row_factors[row];
693 const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)];
694 for (int i = 0; i < ct.num_terms; ++i) {
695 const int index = ct.start_in_buffer + i;
696 const int col = cols[index].value();
697 const double coeff = static_cast<double>(coeffs[index].value());
698 const double scaled_magnitude = row_factor * std::abs(coeff);
699 col_max[col] = std::max(col_max[col], scaled_magnitude);
700 }
701 }
702 for (int col = 0; col < num_cols; ++col) {
703 if (col_max[col] == 0) continue; // Empty.
704 col_factors[col] = 1.0 / col_max[col];
705 }
706}
707
708void LinearProgrammingConstraint::FillLpData() {
709 const int num_rows = integer_lp_.size();
710 const int num_cols = integer_variables_.size();
711 IntegerValue* coeffs = integer_lp_coeffs_.data();
712 glop::ColIndex* cols = integer_lp_cols_.data();
713 double* row_factors = row_factors_.data();
714 double* col_factors = col_factors_.data();
715
716 // Now fill the tranposed matrix
717 glop::CompactSparseMatrix* data = simplex_.MutableTransposedMatrixWithSlack();
718 data->Reset(glop::RowIndex(num_cols + num_rows));
719 for (int row = 0; row < num_rows; ++row) {
720 const LinearConstraintInternal& ct = integer_lp_[RowIndex(row)];
721 const double row_factor = row_factors[row];
722 for (int i = 0; i < ct.num_terms; ++i) {
723 const int index = ct.start_in_buffer + i;
724 const int col = cols[index].value();
725 const double coeff = static_cast<double>(coeffs[index].value());
726 const double scaled_coeff = row_factor * col_factors[col] * coeff;
727 data->AddEntryToCurrentColumn(RowIndex(col), scaled_coeff);
728 }
729
730 // Add slack.
731 data->AddEntryToCurrentColumn(RowIndex(num_cols + row), 1.0);
732
733 // Close column.
734 data->CloseCurrentColumn();
735 }
736 DCHECK_EQ(data->num_cols(), integer_lp_.size());
737
738 // Fill and scale the objective.
739 const glop::ColIndex num_cols_with_slacks(num_rows + num_cols);
740 obj_with_slack_.assign(num_cols_with_slacks, 0.0);
741 for (const auto [col, value] : integer_objective_) {
742 obj_with_slack_[col] = ToDouble(value) * col_factors[col.value()];
743 }
744
745 // Fill and scales the bound.
746 simplex_.MutableLowerBounds()->resize(num_cols_with_slacks);
747 simplex_.MutableUpperBounds()->resize(num_cols_with_slacks);
748 Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data();
749 Fractional* ub_with_slack = simplex_.MutableUpperBounds()->data();
750 const double infinity = std::numeric_limits<double>::infinity();
751 for (int row = 0; row < integer_lp_.size(); ++row) {
752 const LinearConstraintInternal& ct = integer_lp_[glop::RowIndex(row)];
753
754 // TODO(user): Using trivial bound might be good for things like
755 // sum bool <= 1 since setting the slack in [0, 1] can lead to bound flip in
756 // the simplex. However if the bound is large, maybe it make more sense to
757 // use +/- infinity.
758 const double factor = row_factors[row];
759 lb_with_slack[num_cols + row] =
760 ct.ub_is_trivial ? -infinity : ToDouble(-ct.ub) * factor;
761 ub_with_slack[num_cols + row] =
762 ct.lb_is_trivial ? +infinity : ToDouble(-ct.lb) * factor;
763 }
764
765 // We scale the LP using the level zero bounds that we later override
766 // with the current ones.
767 //
768 // TODO(user): As part of the scaling, we may also want to shift the initial
769 // variable bounds so that each variable contain the value zero in their
770 // domain. Maybe just once and for all at the beginning.
771 const int num_vars = integer_variables_.size();
772 for (int i = 0; i < num_vars; i++) {
773 const IntegerVariable cp_var = integer_variables_[i];
774 const double factor = col_factors[i];
775 lb_with_slack[i] =
776 ToDouble(integer_trail_->LevelZeroLowerBound(cp_var)) * factor;
777 ub_with_slack[i] =
778 ToDouble(integer_trail_->LevelZeroUpperBound(cp_var)) * factor;
779 }
780}
781
782void LinearProgrammingConstraint::FillReducedCostReasonIn(
783 const glop::DenseRow& reduced_costs,
784 std::vector<IntegerLiteral>* integer_reason) {
785 integer_reason->clear();
786 const int num_vars = integer_variables_.size();
787 for (int i = 0; i < num_vars; i++) {
788 const double rc = reduced_costs[glop::ColIndex(i)];
789 if (rc > kLpEpsilon) {
790 integer_reason->push_back(
791 integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
792 } else if (rc < -kLpEpsilon) {
793 integer_reason->push_back(
794 integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
795 }
796 }
797
798 integer_trail_->RemoveLevelZeroBounds(integer_reason);
799}
800
802 // Get rid of all optimal constraint each time we go back to level zero.
803 if (level == 0) rev_optimal_constraints_size_ = 0;
804 optimal_constraints_.resize(rev_optimal_constraints_size_);
805 cumulative_optimal_constraint_sizes_.resize(rev_optimal_constraints_size_);
806 if (lp_solution_is_set_ && level < lp_solution_level_) {
807 lp_solution_is_set_ = false;
808 }
809
810 if (level < previous_level_) {
811 lp_at_optimal_ = false;
812 lp_objective_lower_bound_ = -std::numeric_limits<double>::infinity();
813 }
814 previous_level_ = level;
815
816 // Special case for level zero, we "reload" any previously known optimal
817 // solution from that level.
818 //
819 // TODO(user): Keep all optimal solution in the current branch?
820 // TODO(user): Still try to add cuts/constraints though!
821 // TODO(user): Reload the basis? This might cause issue with the basis
822 // saving/loading code in lb_tree_search.
823 if (level == 0 && !lp_solution_is_set_ && !level_zero_lp_solution_.empty()) {
824 lp_solution_is_set_ = true;
825 lp_solution_ = level_zero_lp_solution_;
826 lp_solution_level_ = 0;
827 // Add the fixed variables. They might have been skipped when we did the
828 // linear relaxation of the model, but cut generators expect all variables
829 // to have an LP value.
830 if (expanded_lp_solution_.size() < integer_trail_->NumIntegerVariables()) {
831 expanded_lp_solution_.resize(integer_trail_->NumIntegerVariables());
832 }
833 for (IntegerVariable i(0); i < integer_trail_->NumIntegerVariables(); ++i) {
834 if (integer_trail_->IsFixed(i)) {
835 expanded_lp_solution_[i] = ToDouble(integer_trail_->LowerBound(i));
836 }
837 }
838 for (int i = 0; i < lp_solution_.size(); i++) {
839 const IntegerVariable var = extended_integer_variables_[i];
840 expanded_lp_solution_[var] = lp_solution_[i];
841 expanded_lp_solution_[NegationOf(var)] = -lp_solution_[i];
842 }
843 }
844}
845
847 cut_generators_.push_back(std::move(generator));
848}
849
851 const std::vector<int>& watch_indices) {
852 if (!enabled_) return true;
853
854 // If we have a really deep branch, with a lot of LP explanation constraint,
855 // we could take a quadratic amount of memory: O(num_var) per number of
856 // propagation in that branch. To avoid that, once the memory starts to be
857 // over a few GB, we only propagate from time to time. This way we do not need
858 // to keep that many constraints around.
859 //
860 // Note that 100 Millions int32_t variables, with the int128 coefficients and
861 // extra propagation vector is already a few GB.
862 if (!cumulative_optimal_constraint_sizes_.empty()) {
863 const double current_size =
864 static_cast<double>(cumulative_optimal_constraint_sizes_.back());
865 const double low_limit = 1e7;
866 if (current_size > low_limit) {
867 // We only propagate if we use less that 100 times the number of current
868 // integer literal enqueued.
869 const double num_enqueues = static_cast<double>(integer_trail_->Index());
870 if ((current_size - low_limit) > 100 * num_enqueues) return true;
871 }
872 }
873
874 if (!lp_solution_is_set_ || num_force_lp_call_on_next_propagate_ > 0) {
875 return Propagate();
876 }
877
878 // At level zero, if there is still a chance to add cuts or lazy constraints,
879 // we re-run the LP.
880 //
881 // TODO(user): I am not sure this does anything anymore because of
882 // AlwaysCallAtLevelZero() that might bypass IncrementalPropagate() completely
883 // if no variables changed.
884 if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
885 return Propagate();
886 }
887
888 // Check whether the change breaks the current LP solution. If it does, call
889 // Propagate() on the current LP.
890 for (const int index : watch_indices) {
891 const double lb =
892 ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
893 const double ub =
894 ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
895 const double value = lp_solution_[index];
896 if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
897 }
898
899 // TODO(user): The saved lp solution is still valid given the current variable
900 // bounds, so the LP optimal didn't change. However we might still want to add
901 // new cuts or new lazy constraints?
902 //
903 // TODO(user): Propagate the last optimal_constraint? Note that we need
904 // to be careful since the reversible int in IntegerSumLE are not registered.
905 // However, because we delete "optimalconstraints" on backtrack, we might not
906 // care.
907 //
908 // Remark: Note that if we do the sequence SetBasis() / Propagate() /
909 // GetAndSaveBasis() and we are in the case where the solution is still
910 // optimal, we should get the basis from when the lp solution was set which
911 // should be what we want. Even if we set garbage during SetBasis() it should
912 // be ignored. TODO(user): We might still have problem at level zero.
913 return true;
914}
915
916glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
917 glop::ColIndex var) {
918 return scaler_.UnscaleVariableValue(var, simplex_.GetVariableValue(var));
919}
920
922 IntegerVariable variable) const {
923 return lp_solution_[mirror_lp_variable_[variable].value()];
924}
925
926void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
927 const int num_vars = integer_variables_.size();
928 Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data();
929 Fractional* ub_with_slack = simplex_.MutableUpperBounds()->data();
930 for (int i = 0; i < num_vars; i++) {
931 const IntegerVariable cp_var = integer_variables_[i];
932 const double lb =
933 static_cast<double>(integer_trail_->LowerBound(cp_var).value());
934 const double ub =
935 static_cast<double>(integer_trail_->UpperBound(cp_var).value());
936 const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i));
937 lb_with_slack[i] = lb * factor;
938 ub_with_slack[i] = ub * factor;
939 }
940}
941
942bool LinearProgrammingConstraint::SolveLp() {
943 const int level = trail_->CurrentDecisionLevel();
944 if (level == 0) {
945 lp_at_level_zero_is_final_ = false;
946 }
947
948 const double unscaling_factor = 1.0 / scaler_.ObjectiveScalingFactor();
949 const double offset_before_unscaling =
950 ToDouble(integer_objective_offset_) * scaler_.ObjectiveScalingFactor();
951 auto status = simplex_.MinimizeFromTransposedMatrixWithSlack(
952 obj_with_slack_, unscaling_factor, offset_before_unscaling, time_limit_);
953 DCHECK_EQ(simplex_.GetProblemNumRows(), integer_lp_.size());
954
955 // Lets resolve from scratch if we encounter this status.
956 if (simplex_.GetProblemStatus() == glop::ProblemStatus::ABNORMAL) {
957 VLOG(2) << "The LP solver returned abnormal, resolving from scratch";
958 simplex_.ClearStateForNextSolve();
959 status = simplex_.MinimizeFromTransposedMatrixWithSlack(
960 obj_with_slack_, unscaling_factor, offset_before_unscaling,
961 time_limit_);
962 }
963
964 state_ = simplex_.GetState();
965 total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
966 if (!status.ok()) {
967 VLOG(2) << "The LP solver encountered an error: " << status.error_message();
968 simplex_.ClearStateForNextSolve();
969 return false;
970 }
971 average_degeneracy_.AddData(CalculateDegeneracy());
972 if (average_degeneracy_.CurrentAverage() >= 1000.0) {
973 VLOG(2) << "High average degeneracy: "
974 << average_degeneracy_.CurrentAverage();
975 }
976
977 const int status_as_int = static_cast<int>(simplex_.GetProblemStatus());
978 if (status_as_int >= num_solves_by_status_.size()) {
979 num_solves_by_status_.resize(status_as_int + 1);
980 }
981 num_solves_++;
982 num_solves_by_status_[status_as_int]++;
983 VLOG(2) << DimensionString() << " lvl:" << trail_->CurrentDecisionLevel()
984 << " " << simplex_.GetProblemStatus()
985 << " iter:" << simplex_.GetNumberOfIterations()
986 << " obj:" << simplex_.GetObjectiveValue() << " scaled:"
987 << objective_definition_->ScaleObjective(
988 simplex_.GetObjectiveValue());
989
990 if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL ||
991 simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE) {
992 lp_objective_lower_bound_ = simplex_.GetObjectiveValue();
993 }
994 lp_at_optimal_ = simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL;
995
996 // If stop_after_root_propagation() is true, we still copy whatever we have as
997 // these values will be used for the local-branching lns heuristic.
998 if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL ||
999 parameters_.stop_after_root_propagation()) {
1000 lp_solution_is_set_ = true;
1001 lp_solution_level_ = trail_->CurrentDecisionLevel();
1002 const int num_vars = integer_variables_.size();
1003 const auto reduced_costs = simplex_.GetReducedCosts().const_view();
1004 for (int i = 0; i < num_vars; i++) {
1005 const glop::ColIndex col(i);
1006 const IntegerVariable var = integer_variables_[i];
1007
1008 const glop::Fractional value = GetVariableValueAtCpScale(col);
1009 lp_solution_[i] = value;
1010 expanded_lp_solution_[var] = value;
1011 expanded_lp_solution_[NegationOf(var)] = -value;
1012
1013 const glop::Fractional rc =
1014 scaler_.UnscaleReducedCost(col, reduced_costs[col]);
1015 lp_reduced_cost_[i] = rc;
1016 expanded_reduced_costs_[var] = rc;
1017 expanded_reduced_costs_[NegationOf(var)] = -rc;
1018 }
1019
1020 // Lets fix the result in case of symmetry since the variable in symmetry
1021 // are actually not part of the LP, they will just be at their bounds.
1022 for (const int orbit_index : orbit_indices_) {
1023 const IntegerVariable sum_var = symmetrizer_->OrbitSumVar(orbit_index);
1024 const double lp_value = expanded_lp_solution_[sum_var];
1025 const absl::Span<const IntegerVariable> orbit =
1026 symmetrizer_->Orbit(orbit_index);
1027
1028 // At level zero, we want to focus on the cuts, and we split the value
1029 // evently across all the orbit. however at positive level we don't do
1030 // cuts and we want values that make more sense to follow heuristically.
1031 // In particular, if the sum_var is at its lower/upper bounds, we want
1032 // the same for all variable from the orbit. This partially address the
1033 // TODO below.
1034 bool to_lower = false;
1035 bool to_upper = false;
1036 if (trail_->CurrentDecisionLevel() > 0) {
1037 const double lb =
1038 static_cast<double>(integer_trail_->LowerBound(sum_var).value());
1039 const double ub =
1040 static_cast<double>(integer_trail_->UpperBound(sum_var).value());
1041 if (std::abs(lp_value - lb) < 1e-2) {
1042 to_lower = true;
1043 } else if (std::abs(lp_value - ub) < 1e-2) {
1044 to_upper = true;
1045 }
1046 }
1047
1048 // We assign sum / orbit_size to each variables.
1049 // This is still an LP optimal, but not necessarily a good heuristic.
1050 //
1051 // TODO(user): using sum / orbit_size is good for the cut generation that
1052 // might still use these variables, any violated cuts on the original
1053 // problem where all variables in the orbit have the same value will
1054 // result in a violated cut for the folded problem. However it is probably
1055 // not so good for the heuristics that uses the LP values. In particular
1056 // it might result in LP value not even within the bounds of the
1057 // individual variable since as we branch, we don't have an identical
1058 // domain for all variables in an orbit. Maybe we can generate two
1059 // solutions vectors, one for the cuts and one for the heuristics, or we
1060 // can add custom code to the cuts so that they don't depend on this.
1061 const double even_split = lp_value / static_cast<double>(orbit.size());
1062
1063 // For the reduced costs, they are the same. There should be no
1064 // complication there.
1065 const double new_rc = expanded_reduced_costs_[sum_var];
1066
1067 for (const IntegerVariable var : orbit) {
1068 const glop::ColIndex col = GetMirrorVariable(var);
1069
1070 double new_value = even_split;
1071 if (to_lower) {
1072 new_value =
1073 static_cast<double>(integer_trail_->LowerBound(var).value());
1074 } else if (to_upper) {
1075 new_value =
1076 static_cast<double>(integer_trail_->UpperBound(var).value());
1077 }
1078
1079 lp_solution_[col.value()] = new_value;
1080 expanded_lp_solution_[var] = new_value;
1081 expanded_lp_solution_[NegationOf(var)] = -new_value;
1082
1083 lp_reduced_cost_[col.value()] = new_rc;
1084 expanded_reduced_costs_[var] = new_rc;
1085 expanded_reduced_costs_[NegationOf(var)] = -new_rc;
1086 }
1087 }
1088
1089 // Compute integrality.
1090 lp_solution_is_integer_ = true;
1091 for (const double value : lp_solution_) {
1092 if (std::abs(value - std::round(value)) > kCpEpsilon) {
1093 lp_solution_is_integer_ = false;
1094 break;
1095 }
1096 }
1097
1098 if (lp_solution_level_ == 0) {
1099 level_zero_lp_solution_ = lp_solution_;
1100 }
1101 }
1102
1103 return true;
1104}
1105
1106bool LinearProgrammingConstraint::AnalyzeLp() {
1107 // A dual-unbounded problem is infeasible. We use the dual ray reason.
1108 if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_UNBOUNDED) {
1109 if (parameters_.use_exact_lp_reason()) {
1110 return PropagateExactDualRay();
1111 }
1112 FillReducedCostReasonIn(simplex_.GetDualRayRowCombination(),
1113 &integer_reason_);
1114 return integer_trail_->ReportConflict(integer_reason_);
1115 }
1116
1117 // TODO(user): Update limits for DUAL_UNBOUNDED status as well.
1118 UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
1119
1120 // Optimality deductions if problem has an objective.
1121 // If there is no objective, then all duals will just be zero.
1122 if (objective_is_defined_ &&
1123 (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL ||
1124 simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE)) {
1125 // TODO(user): Maybe do a bit less computation when we cannot propagate
1126 // anything.
1127 if (parameters_.use_exact_lp_reason()) {
1128 if (!PropagateExactLpReason()) return false;
1129
1130 // Display when the inexact bound would have propagated more.
1131 if (VLOG_IS_ON(2)) {
1132 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1133 const IntegerValue approximate_new_lb(static_cast<int64_t>(
1134 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1135 const IntegerValue propagated_lb =
1136 integer_trail_->LowerBound(objective_cp_);
1137 if (approximate_new_lb > propagated_lb) {
1138 VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
1139 << ToDouble(integer_trail_->UpperBound(objective_cp_))
1140 << " ] approx_lb += "
1141 << ToDouble(approximate_new_lb - propagated_lb) << " gap: "
1142 << integer_trail_->UpperBound(objective_cp_) - propagated_lb;
1143 }
1144 }
1145 } else {
1146 // Try to filter optimal objective value. Note that GetObjectiveValue()
1147 // already take care of the scaling so that it returns an objective in the
1148 // CP world.
1149 FillReducedCostReasonIn(simplex_.GetReducedCosts(), &integer_reason_);
1150 const double objective_cp_ub =
1151 ToDouble(integer_trail_->UpperBound(objective_cp_));
1152 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
1153 ReducedCostStrengtheningDeductions(objective_cp_ub -
1154 relaxed_optimal_objective);
1155 if (!deductions_.empty()) {
1156 deductions_reason_ = integer_reason_;
1157 deductions_reason_.push_back(
1158 integer_trail_->UpperBoundAsLiteral(objective_cp_));
1159 }
1160
1161 // Push new objective lb.
1162 const IntegerValue approximate_new_lb(static_cast<int64_t>(
1163 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
1164 if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
1165 const IntegerLiteral deduction =
1166 IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
1167 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
1168 return false;
1169 }
1170 }
1171
1172 // Push reduced cost strengthening bounds.
1173 if (!deductions_.empty()) {
1174 const int trail_index_with_same_reason = integer_trail_->Index();
1175 for (const IntegerLiteral deduction : deductions_) {
1176 if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
1177 trail_index_with_same_reason)) {
1178 return false;
1179 }
1180 }
1181 }
1182 }
1183 }
1184
1185 // Copy more info about the current solution.
1186 if (compute_reduced_cost_averages_ &&
1187 simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1188 CHECK(lp_solution_is_set_);
1189 UpdateAverageReducedCosts();
1190 }
1191
1192 // On some problem, LP solves and cut rounds can be slow, so we report
1193 // the current possible objective improvement in the middle of the
1194 // propagation, not just at the end.
1195 //
1196 // Note that we currently only do that if the LP is "full" and its objective
1197 // is the same as the one of the whole problem.
1198 if (objective_is_defined_ &&
1199 objective_definition_->objective_var == objective_cp_ &&
1200 trail_->CurrentDecisionLevel() == 0) {
1201 shared_response_manager_->UpdateInnerObjectiveBounds(
1202 model_->Name(), integer_trail_->LowerBound(objective_cp_),
1203 integer_trail_->UpperBound(objective_cp_));
1204 }
1205
1206 return true;
1207}
1208
1209// Note that since we call this on the constraint with slack, we actually have
1210// linear expression == rhs, we can use this to propagate more!
1211//
1212// TODO(user): Also propagate on -cut ? in practice we already do that in many
1213// places were we try to generate the cut on -cut... But we could do it sooner
1214// and more cleanly here.
1215bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack,
1216 CutData* cut) {
1217 // Because of complement, all coeffs and all terms are positive after this.
1218 cut->ComplementForPositiveCoefficients();
1219 if (cut->rhs < 0) {
1220 problem_proven_infeasible_by_cuts_ = true;
1221 return false;
1222 }
1223
1224 // Limited DP to compute first few reachable values.
1225 // Note that all coeff are positive.
1226 reachable_.Reset();
1227 for (const CutTerm& term : cut->terms) {
1228 reachable_.Add(term.coeff.value());
1229 }
1230
1231 // Extra propag since we know it is actually an equality constraint.
1232 if (cut->rhs < absl::int128(reachable_.LastValue()) &&
1233 !reachable_.MightBeReachable(static_cast<int64_t>(cut->rhs))) {
1234 problem_proven_infeasible_by_cuts_ = true;
1235 return false;
1236 }
1237
1238 bool some_fixed_terms = false;
1239 bool some_fractional_positions = false;
1240 for (CutTerm& term : cut->terms) {
1241 const absl::int128 magnitude128 = term.coeff.value();
1242 const absl::int128 range =
1243 absl::int128(term.bound_diff.value()) * magnitude128;
1244
1245 IntegerValue new_diff = term.bound_diff;
1246 if (range > cut->rhs) {
1247 new_diff = static_cast<int64_t>(cut->rhs / magnitude128);
1248 }
1249 {
1250 // Extra propag since we know it is actually an equality constraint.
1251 absl::int128 rest128 =
1252 cut->rhs - absl::int128(new_diff.value()) * magnitude128;
1253 while (rest128 < absl::int128(reachable_.LastValue()) &&
1254 !reachable_.MightBeReachable(static_cast<int64_t>(rest128))) {
1255 ++total_num_eq_propagations_;
1256 CHECK_GT(new_diff, 0);
1257 --new_diff;
1258 rest128 += magnitude128;
1259 }
1260 }
1261
1262 if (new_diff < term.bound_diff) {
1263 term.bound_diff = new_diff;
1264
1265 const IntegerVariable var = term.expr_vars[0];
1266 if (var < first_slack) {
1267 // Normal variable.
1268 ++total_num_cut_propagations_;
1269
1270 // Note that at this stage we only have X - lb or ub - X.
1271 if (term.expr_coeffs[0] == 1) {
1272 // X + offset <= bound_diff
1273 if (!integer_trail_->Enqueue(
1275 var, term.bound_diff - term.expr_offset),
1276 {}, {})) {
1277 problem_proven_infeasible_by_cuts_ = true;
1278 return false;
1279 }
1280 } else {
1281 CHECK_EQ(term.expr_coeffs[0], -1);
1282 // offset - X <= bound_diff
1283 if (!integer_trail_->Enqueue(
1285 var, term.expr_offset - term.bound_diff),
1286 {}, {})) {
1287 problem_proven_infeasible_by_cuts_ = true;
1288 return false;
1289 }
1290 }
1291 } else {
1292 // This is a tighter bound on one of the constraint! like a cut. Note
1293 // that in some corner case, new cut can be merged and update the bounds
1294 // of the constraint before this code.
1295 const int slack_index = (var.value() - first_slack.value()) / 2;
1296 const glop::RowIndex row = tmp_slack_rows_[slack_index];
1297 if (term.expr_coeffs[0] == 1) {
1298 // slack = ct + offset <= bound_diff;
1299 const IntegerValue new_ub = term.bound_diff - term.expr_offset;
1300 if (constraint_manager_.UpdateConstraintUb(row, new_ub)) {
1301 integer_lp_[row].ub = new_ub;
1302 }
1303 } else {
1304 // slack = offset - ct <= bound_diff;
1305 CHECK_EQ(term.expr_coeffs[0], -1);
1306 const IntegerValue new_lb = term.expr_offset - term.bound_diff;
1307 if (constraint_manager_.UpdateConstraintLb(row, new_lb)) {
1308 integer_lp_[row].lb = new_lb;
1309 }
1310 }
1311 }
1312 }
1313
1314 if (term.bound_diff == 0) {
1315 some_fixed_terms = true;
1316 } else {
1317 if (term.IsFractional()) {
1318 some_fractional_positions = true;
1319 }
1320 }
1321 }
1322
1323 // Remove fixed terms if any.
1324 if (some_fixed_terms) {
1325 int new_size = 0;
1326 for (const CutTerm& term : cut->terms) {
1327 if (term.bound_diff == 0) continue;
1328 cut->terms[new_size++] = term;
1329 }
1330 cut->terms.resize(new_size);
1331 }
1332 return some_fractional_positions;
1333}
1334
1335bool LinearProgrammingConstraint::AddCutFromConstraints(
1336 absl::string_view name,
1337 absl::Span<const std::pair<RowIndex, IntegerValue>> integer_multipliers) {
1338 // This is initialized to a valid linear constraint (by taking linear
1339 // combination of the LP rows) and will be transformed into a cut if
1340 // possible.
1341 //
1342 // TODO(user): For CG cuts, Ideally this linear combination should have only
1343 // one fractional variable (basis_col). But because of imprecision, we can get
1344 // a bunch of fractional entry with small coefficient (relative to the one of
1345 // basis_col). We try to handle that in IntegerRoundingCut(), but it might be
1346 // better to add small multiple of the involved rows to get rid of them.
1347 IntegerValue cut_ub;
1348 if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
1349 &cut_ub)) {
1350 ++num_cut_overflows_;
1351 VLOG(1) << "Issue, overflow!";
1352 return false;
1353 }
1354
1355 // Important: because we use integer_multipliers below to create the slack,
1356 // we cannot try to adjust this linear combination easily.
1357 //
1358 // TODO(user): the conversion col_index -> IntegerVariable is slow and could
1359 // in principle be removed. Easy for cuts, but not so much for
1360 // implied_bounds_processor_. Note that in theory this could allow us to
1361 // use Literal directly without the need to have an IntegerVariable for them.
1362 tmp_scattered_vector_.ConvertToCutData(
1363 cut_ub.value(), extended_integer_variables_, lp_solution_, integer_trail_,
1364 &base_ct_);
1365
1366 // If there are no integer (all Booleans), no need to try implied bounds
1367 // heurititics. By setting this to nullptr, we are a bit faster.
1368 ImpliedBoundsProcessor* ib_processor = nullptr;
1369 {
1370 bool some_ints = false;
1371 bool some_fractional_positions = false;
1372 for (const CutTerm& term : base_ct_.terms) {
1373 if (term.bound_diff > 1) some_ints = true;
1374 if (term.IsFractional()) {
1375 some_fractional_positions = true;
1376 }
1377 }
1378
1379 // If all value are integer, we will not be able to cut anything.
1380 if (!some_fractional_positions) return false;
1381 if (some_ints) ib_processor = &implied_bounds_processor_;
1382 }
1383
1384 // Add constraint slack.
1385 // This is important for correctness here.
1386 const IntegerVariable first_slack(expanded_lp_solution_.size());
1387 CHECK_EQ(first_slack.value() % 2, 0);
1388 tmp_slack_rows_.clear();
1389 for (const auto [row, coeff] : integer_multipliers) {
1390 if (integer_lp_[row].lb == integer_lp_[row].ub) continue;
1391
1392 CutTerm entry;
1393 entry.coeff = coeff > 0 ? coeff : -coeff;
1394 entry.lp_value = 0.0;
1395 entry.bound_diff = integer_lp_[row].ub - integer_lp_[row].lb;
1396 entry.expr_vars[0] =
1397 first_slack + 2 * IntegerVariable(tmp_slack_rows_.size());
1398 entry.expr_coeffs[1] = 0;
1399 const double activity = scaler_.UnscaleConstraintActivity(
1400 row, simplex_.GetConstraintActivity(row));
1401 if (coeff > 0) {
1402 // Slack = ub - constraint;
1403 entry.lp_value = ToDouble(integer_lp_[row].ub) - activity;
1404 entry.expr_coeffs[0] = IntegerValue(-1);
1405 entry.expr_offset = integer_lp_[row].ub;
1406 } else {
1407 // Slack = constraint - lb;
1408 entry.lp_value = activity - ToDouble(integer_lp_[row].lb);
1409 entry.expr_coeffs[0] = IntegerValue(1);
1410 entry.expr_offset = -integer_lp_[row].lb;
1411 }
1412
1413 base_ct_.terms.push_back(entry);
1414 tmp_slack_rows_.push_back(row);
1415 }
1416
1417 // This also make all coefficients positive.
1418 if (!PreprocessCut(first_slack, &base_ct_)) return false;
1419
1420 // We cannot cut sum Bool <= 1.
1421 if (base_ct_.rhs == 1) return false;
1422
1423 // Constraint with slack should be tight.
1424 if (DEBUG_MODE) {
1425 double activity = 0.0;
1426 double norm = 0.0;
1427 for (const CutTerm& term : base_ct_.terms) {
1428 const double coeff = ToDouble(term.coeff);
1429 activity += term.lp_value * coeff;
1430 norm += coeff * coeff;
1431 }
1432 if (std::abs(activity - static_cast<double>(base_ct_.rhs)) / norm > 1e-4) {
1433 VLOG(1) << "Cut not tight " << activity
1434 << " <= " << static_cast<double>(base_ct_.rhs);
1435 return false;
1436 }
1437 }
1438
1439 bool at_least_one_added = false;
1440 DCHECK(base_ct_.AllCoefficientsArePositive());
1441
1442 // Try RLT cuts.
1443 //
1444 // TODO(user): try this for more than just "base" constraints?
1445 if (integer_multipliers.size() == 1 && parameters_.add_rlt_cuts()) {
1446 if (rlt_cut_helper_.TrySimpleSeparation(base_ct_)) {
1447 at_least_one_added |= PostprocessAndAddCut(
1448 absl::StrCat(name, "_RLT"), rlt_cut_helper_.Info(), first_slack,
1449 rlt_cut_helper_.cut());
1450 }
1451 }
1452
1453 // Note that the indexing will survive ComplementForSmallerLpValues() below.
1454 if (ib_processor != nullptr) {
1455 if (!ib_processor->CacheDataForCut(first_slack, &base_ct_)) {
1456 ib_processor = nullptr;
1457 }
1458 }
1459
1460 // Try cover approach to find cut.
1461 // TODO(user): Share common computation between kinds.
1462 {
1463 cover_cut_helper_.ClearCache();
1464
1465 if (cover_cut_helper_.TrySingleNodeFlow(base_ct_, ib_processor)) {
1466 at_least_one_added |= PostprocessAndAddCut(
1467 absl::StrCat(name, "_FF"), cover_cut_helper_.Info(), first_slack,
1468 cover_cut_helper_.cut());
1469 }
1470 if (cover_cut_helper_.TrySimpleKnapsack(base_ct_, ib_processor)) {
1471 at_least_one_added |= PostprocessAndAddCut(
1472 absl::StrCat(name, "_K"), cover_cut_helper_.Info(), first_slack,
1473 cover_cut_helper_.cut());
1474 }
1475
1476 // This one need to be called after TrySimpleKnapsack() in order to reuse
1477 // some cached data if possible.
1478 if (cover_cut_helper_.TryWithLetchfordSouliLifting(base_ct_,
1479 ib_processor)) {
1480 at_least_one_added |= PostprocessAndAddCut(
1481 absl::StrCat(name, "_KL"), cover_cut_helper_.Info(), first_slack,
1482 cover_cut_helper_.cut());
1483 }
1484 }
1485
1486 // Try integer rounding heuristic to find cut.
1487 {
1488 base_ct_.ComplementForSmallerLpValues();
1489
1490 RoundingOptions options;
1491 options.max_scaling = parameters_.max_integer_rounding_scaling();
1492 options.use_ib_before_heuristic = false;
1493 if (integer_rounding_cut_helper_.ComputeCut(options, base_ct_,
1494 ib_processor)) {
1495 at_least_one_added |= PostprocessAndAddCut(
1496 absl::StrCat(name, "_R"), integer_rounding_cut_helper_.Info(),
1497 first_slack, integer_rounding_cut_helper_.cut());
1498 }
1499
1500 options.use_ib_before_heuristic = true;
1501 options.prefer_positive_ib = false;
1502 if (ib_processor != nullptr && integer_rounding_cut_helper_.ComputeCut(
1503 options, base_ct_, ib_processor)) {
1504 at_least_one_added |= PostprocessAndAddCut(
1505 absl::StrCat(name, "_RB"), integer_rounding_cut_helper_.Info(),
1506 first_slack, integer_rounding_cut_helper_.cut());
1507 }
1508
1509 options.use_ib_before_heuristic = true;
1510 options.prefer_positive_ib = true;
1511 if (ib_processor != nullptr && integer_rounding_cut_helper_.ComputeCut(
1512 options, base_ct_, ib_processor)) {
1513 at_least_one_added |= PostprocessAndAddCut(
1514 absl::StrCat(name, "_RBP"), integer_rounding_cut_helper_.Info(),
1515 first_slack, integer_rounding_cut_helper_.cut());
1516 }
1517 }
1518
1519 return at_least_one_added;
1520}
1521
1522bool LinearProgrammingConstraint::PostprocessAndAddCut(
1523 const std::string& name, const std::string& info,
1524 IntegerVariable first_slack, const CutData& cut) {
1525 if (cut.rhs > absl::int128(std::numeric_limits<int64_t>::max()) ||
1526 cut.rhs < absl::int128(std::numeric_limits<int64_t>::min())) {
1527 VLOG(2) << "RHS overflow " << name << " " << info;
1528 ++num_cut_overflows_;
1529 return false;
1530 }
1531
1532 // We test this here to avoid doing the costly conversion below.
1533 //
1534 // TODO(user): Ideally we should detect this even earlier during the cut
1535 // generation.
1536 if (cut.ComputeViolation() < 1e-4) {
1537 VLOG(3) << "Bad cut " << name << " " << info;
1538 ++num_bad_cuts_;
1539 return false;
1540 }
1541
1542 // Substitute any slack left.
1543 tmp_scattered_vector_.ClearAndResize(extended_integer_variables_.size());
1544 IntegerValue cut_ub = static_cast<int64_t>(cut.rhs);
1545 for (const CutTerm& term : cut.terms) {
1546 if (term.coeff == 0) continue;
1547 if (!AddProductTo(-term.coeff, term.expr_offset, &cut_ub)) {
1548 VLOG(2) << "Overflow in conversion";
1549 ++num_cut_overflows_;
1550 return false;
1551 }
1552 for (int i = 0; i < 2; ++i) {
1553 if (term.expr_coeffs[i] == 0) continue;
1554 const IntegerVariable var = term.expr_vars[i];
1555 const IntegerValue coeff =
1556 CapProd(term.coeff.value(), term.expr_coeffs[i].value());
1557 if (AtMinOrMaxInt64(coeff.value())) {
1558 VLOG(2) << "Overflow in conversion";
1559 ++num_cut_overflows_;
1560 return false;
1561 }
1562
1563 // Simple copy for non-slack variables.
1564 if (var < first_slack) {
1565 const glop::ColIndex col = mirror_lp_variable_[PositiveVariable(var)];
1566 if (VariableIsPositive(var)) {
1567 tmp_scattered_vector_.Add(col, coeff);
1568 } else {
1569 tmp_scattered_vector_.Add(col, -coeff);
1570 }
1571 continue;
1572 } else {
1573 // Replace slack from LP constraints.
1574 const int slack_index = (var.value() - first_slack.value()) / 2;
1575 const glop::RowIndex row = tmp_slack_rows_[slack_index];
1576 if (!tmp_scattered_vector_.AddLinearExpressionMultiple(
1577 coeff, IntegerLpRowCols(row), IntegerLpRowCoeffs(row),
1578 infinity_norms_[row])) {
1579 VLOG(2) << "Overflow in slack removal";
1580 ++num_cut_overflows_;
1581 return false;
1582 }
1583 }
1584 }
1585 }
1586
1587 // TODO(user): avoid allocating memory if it turns out this is a duplicate of
1588 // something we already added. This tends to happen if the duplicate was
1589 // already a generated cut which is currently not part of the LP.
1590 LinearConstraint converted_cut =
1591 tmp_scattered_vector_.ConvertToLinearConstraint(
1592 extended_integer_variables_, cut_ub);
1593
1594 // TODO(user): We probably generate too many cuts, keep best one only?
1595 // Note that we do need duplicate removal and maybe some orthogonality here?
1596 if (/*DISABLES CODE*/ (false)) {
1597 top_n_cuts_.AddCut(std::move(converted_cut), name, expanded_lp_solution_);
1598 return true;
1599 }
1600 return constraint_manager_.AddCut(std::move(converted_cut), name, info);
1601}
1602
1603// TODO(user): This can be still too slow on some problems like
1604// 30_70_45_05_100.mps.gz. Not this actual function, but the set of computation
1605// it triggers. We should add heuristics to abort earlier if a cut is not
1606// promising. Or only test a few positions and not all rows.
1607void LinearProgrammingConstraint::AddCGCuts() {
1608 std::vector<std::pair<RowIndex, double>> sorted_columns;
1609 const RowIndex num_rows(integer_lp_.size());
1610 glop::DenseColumn::ConstView norms = simplex_.GetDualSquaredNorms();
1611 for (RowIndex index(0); index < num_rows; ++index) {
1612 const ColIndex basis_col = simplex_.GetBasis(index);
1613
1614 // We used to skip slack and also not to do "classical" gomory and instead
1615 // call IgnoreTrivialConstraintMultipliers() heuristic. It is usually faster
1616 // but on some problem like neos*creuse or neos-888544, this do not find
1617 // good cut though.
1618 //
1619 // TODO(user): Tune this. It seems better but we need to handle nicely the
1620 // extra amount of cuts this produces.
1621 if (basis_col >= integer_variables_.size()) continue;
1622
1623 // Get he variable value at cp-scale. Similar to GetVariableValueAtCpScale()
1624 // but this works for slack variable too.
1625 const Fractional lp_value =
1626 simplex_.GetVariableValue(basis_col) /
1627 scaler_.VariableScalingFactorWithSlack(basis_col);
1628
1629 // Only consider fractional basis element. We ignore element that are close
1630 // to an integer to reduce the amount of positions we try.
1631 //
1632 // TODO(user): We could just look at the diff with std::floor() in the hope
1633 // that when we are just under an integer, the exact computation below will
1634 // also be just under it.
1635 const double fractionality = std::abs(lp_value - std::round(lp_value));
1636 if (fractionality < 0.01) continue;
1637
1638 const double score = fractionality * (1.0 - fractionality) / norms[index];
1639 sorted_columns.push_back({index, score});
1640 }
1641 absl::c_sort(sorted_columns, [](const std::pair<RowIndex, double>& a,
1642 const std::pair<RowIndex, double>& b) {
1643 return a.second > b.second;
1644 });
1645
1646 int num_added = 0;
1647 for (const auto [index, _] : sorted_columns) {
1648 if (time_limit_->LimitReached()) return;
1649 // We multiply by row_factors_ directly, which might be slightly more
1650 // precise than dividing by 1/factor like UnscaleLeftSolveValue() does.
1651 //
1652 // TODO(user): Avoid code duplication between the sparse/dense path.
1653 tmp_lp_multipliers_.clear();
1654 const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(index);
1655 if (lambda.non_zeros.empty()) {
1656 for (RowIndex row(0); row < num_rows; ++row) {
1657 const double value = lambda.values[glop::RowToColIndex(row)];
1658 if (std::abs(value) < kZeroTolerance) continue;
1659 tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value});
1660 }
1661 } else {
1662 for (const ColIndex col : lambda.non_zeros) {
1663 const RowIndex row = glop::ColToRowIndex(col);
1664 const double value = lambda.values[col];
1665 if (std::abs(value) < kZeroTolerance) continue;
1666 tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value});
1667 }
1668 }
1669
1670 // Size 1 is already done with MIR.
1671 if (tmp_lp_multipliers_.size() <= 1) continue;
1672
1673 IntegerValue scaling;
1674 for (int i = 0; i < 2; ++i) {
1675 tmp_cg_multipliers_ = tmp_lp_multipliers_;
1676 if (i == 1) {
1677 // Try other sign.
1678 //
1679 // TODO(user): Maybe add an heuristic to know beforehand which sign to
1680 // use?
1681 for (std::pair<RowIndex, double>& p : tmp_cg_multipliers_) {
1682 p.second = -p.second;
1683 }
1684 }
1685
1686 // Remove constraints that shouldn't be helpful.
1687 //
1688 // In practice, because we can complement the slack, it might still be
1689 // useful to have some constraint with a trivial upper bound. Also
1690 // removing this seem to generate a lot more cuts, so we need to be more
1691 // efficient in dealing with them.
1692 if (true) {
1693 IgnoreTrivialConstraintMultipliers(&tmp_cg_multipliers_);
1694 if (tmp_cg_multipliers_.size() <= 1) continue;
1695 }
1696 ScaleMultipliers(tmp_cg_multipliers_,
1697 /*take_objective_into_account=*/false, &scaling,
1698 &tmp_integer_multipliers_);
1699 if (scaling != 0) {
1700 if (AddCutFromConstraints("CG", tmp_integer_multipliers_)) {
1701 ++num_added;
1702 }
1703 }
1704 }
1705
1706 // Stop if we already added more than 10 cuts this round.
1707 if (num_added > 10) break;
1708 }
1709}
1710
1711// Because we know the objective is integer, the constraint objective >= lb can
1712// sometime cut the current lp optimal, and it can make a big difference to add
1713// it. Or at least use it when constructing more advanced cuts. See
1714// 'multisetcover_batch_0_case_115_instance_0_small_subset_elements_3_sumreqs
1715// _1295_candidates_41.fzn'
1716//
1717// TODO(user): It might be better to just integrate this with the MIR code so
1718// that we not only consider MIR1 involving the objective but we also consider
1719// combining it with other constraints.
1720void LinearProgrammingConstraint::AddObjectiveCut() {
1721 if (integer_objective_.size() <= 1) return;
1722
1723 // We only try to add such cut if the LB objective is "far" from the current
1724 // objective lower bound. Note that this is in term of the "internal" integer
1725 // objective.
1726 const double obj_lp_value = simplex_.GetObjectiveValue();
1727 const IntegerValue obj_lower_bound =
1728 integer_trail_->LevelZeroLowerBound(objective_cp_);
1729 if (obj_lp_value + 1.0 >= ToDouble(obj_lower_bound)) return;
1730
1731 // We negate everything to have a <= base constraint.
1732 LinearConstraint objective_ct;
1733 objective_ct.lb = kMinIntegerValue;
1734 objective_ct.ub = integer_objective_offset_ -
1735 integer_trail_->LevelZeroLowerBound(objective_cp_);
1736 IntegerValue obj_coeff_magnitude(0);
1737 objective_ct.resize(integer_objective_.size());
1738 int i = 0;
1739 for (const auto& [col, coeff] : integer_objective_) {
1740 const IntegerVariable var = integer_variables_[col.value()];
1741 objective_ct.vars[i] = var;
1742 objective_ct.coeffs[i] = -coeff;
1743 obj_coeff_magnitude = std::max(obj_coeff_magnitude, IntTypeAbs(coeff));
1744 ++i;
1745 }
1746
1747 if (!base_ct_.FillFromLinearConstraint(objective_ct, expanded_lp_solution_,
1748 integer_trail_)) {
1749 return;
1750 }
1751
1752 // If the magnitude is small enough, just try to add the full objective. Other
1753 // cuts will be derived in subsequent passes. Otherwise, try normal cut
1754 // heuristic that should result in a cut with reasonable coefficients.
1755 if (obj_coeff_magnitude < 1e9 &&
1756 constraint_manager_.AddCut(std::move(objective_ct), "Objective")) {
1757 return;
1758 }
1759
1760 // If there are no integer (all Booleans), no need to try implied bounds
1761 // heurititics. By setting this to nullptr, we are a bit faster.
1762 ImpliedBoundsProcessor* ib_processor = nullptr;
1763 {
1764 bool some_ints = false;
1765 bool some_relevant_positions = false;
1766 for (const CutTerm& term : base_ct_.terms) {
1767 if (term.bound_diff > 1) some_ints = true;
1768 if (term.HasRelevantLpValue()) some_relevant_positions = true;
1769 }
1770
1771 // If all value are integer, we will not be able to cut anything.
1772 if (!some_relevant_positions) return;
1773 if (some_ints) ib_processor = &implied_bounds_processor_;
1774 }
1775
1776 // Note that the indexing will survive the complement of terms below.
1777 const IntegerVariable first_slack(
1778 std::numeric_limits<IntegerVariable::ValueType>::max());
1779 if (ib_processor != nullptr) {
1780 if (!ib_processor->CacheDataForCut(first_slack, &base_ct_)) {
1781 ib_processor = nullptr;
1782 }
1783 }
1784
1785 // Try knapsack.
1786 base_ct_.ComplementForPositiveCoefficients();
1787 cover_cut_helper_.ClearCache();
1788 if (cover_cut_helper_.TrySimpleKnapsack(base_ct_, ib_processor)) {
1789 PostprocessAndAddCut("Objective_K", cover_cut_helper_.Info(), first_slack,
1790 cover_cut_helper_.cut());
1791 }
1792
1793 // Try rounding.
1794 RoundingOptions options;
1795 options.max_scaling = parameters_.max_integer_rounding_scaling();
1796 base_ct_.ComplementForSmallerLpValues();
1797 if (integer_rounding_cut_helper_.ComputeCut(options, base_ct_,
1798 ib_processor)) {
1799 PostprocessAndAddCut("Objective_R", integer_rounding_cut_helper_.Info(),
1800 first_slack, integer_rounding_cut_helper_.cut());
1801 }
1802}
1803
1804void LinearProgrammingConstraint::AddMirCuts() {
1805 // Heuristic to generate MIR_n cuts by combining a small number of rows. This
1806 // works greedily and follow more or less the MIR cut description in the
1807 // literature. We have a current cut, and we add one more row to it while
1808 // eliminating a variable of the current cut whose LP value is far from its
1809 // bound.
1810 //
1811 // A notable difference is that we randomize the variable we eliminate and
1812 // the row we use to do so. We still have weights to indicate our preferred
1813 // choices. This allows to generate different cuts when called again and
1814 // again.
1815 //
1816 // TODO(user): We could combine n rows to make sure we eliminate n variables
1817 // far away from their bounds by solving exactly in integer small linear
1818 // system.
1819 util_intops::StrongVector<ColIndex, IntegerValue> dense_cut(
1820 integer_variables_.size(), IntegerValue(0));
1821 SparseBitset<ColIndex> non_zeros_(ColIndex(integer_variables_.size()));
1822
1823 // We compute all the rows that are tight, these will be used as the base row
1824 // for the MIR_n procedure below.
1825 const int num_cols = integer_variables_.size();
1826 const int num_rows = integer_lp_.size();
1827 std::vector<std::pair<RowIndex, IntegerValue>> base_rows;
1828 util_intops::StrongVector<RowIndex, double> row_weights(num_rows, 0.0);
1829 Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data();
1830 Fractional* ub_with_slack = simplex_.MutableUpperBounds()->data();
1831 for (RowIndex row(0); row < num_rows; ++row) {
1832 // We only consider tight rows.
1833 // We use both the status and activity to have as much options as possible.
1834 //
1835 // TODO(user): shall we consider rows that are not tight?
1836 // TODO(user): Ignore trivial rows? Note that we do that for MIR1 since it
1837 // cannot be good.
1838 const auto status = simplex_.GetConstraintStatus(row);
1839 const double activity = simplex_.GetConstraintActivity(row);
1840 const double ct_lb = -ub_with_slack[num_cols + row.value()];
1841 const double ct_ub = -lb_with_slack[num_cols + row.value()];
1842 if (activity > ct_ub - 1e-4 ||
1845 base_rows.push_back({row, IntegerValue(1)});
1846 }
1847 if (activity < ct_lb + 1e-4 ||
1850 base_rows.push_back({row, IntegerValue(-1)});
1851 }
1852
1853 // For now, we use the dual values for the row "weights".
1854 //
1855 // Note that we use the dual at LP scale so that it make more sense when we
1856 // compare different rows since the LP has been scaled.
1857 //
1858 // TODO(user): In Kati Wolter PhD "Implementation of Cutting Plane
1859 // Separators for Mixed Integer Programs" which describe SCIP's MIR cuts
1860 // implementation (or at least an early version of it), a more complex score
1861 // is used.
1862 //
1863 // Note(user): Because we only consider tight rows under the current lp
1864 // solution (i.e. non-basic rows), most should have a non-zero dual values.
1865 // But there is some degenerate problem where these rows have a really low
1866 // weight (or even zero), and having only weight of exactly zero in
1867 // std::discrete_distribution will result in a crash.
1868 row_weights[row] =
1869 std::max(Fractional(1e-8), std::abs(simplex_.GetDualValue(row)));
1870 }
1871
1872 // The code here can be really slow, so we put a limit on the number of
1873 // entries we process. We randomize the base_rows so that on the next calls
1874 // we do not do exactly the same if we can't process many base row.
1875 int64_t dtime_num_entries = 0;
1876 std::shuffle(base_rows.begin(), base_rows.end(), *random_);
1877
1878 std::vector<double> weights;
1879 util_intops::StrongVector<RowIndex, bool> used_rows;
1880 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1881 const auto matrix = simplex_.MatrixWithSlack().view();
1882 for (const std::pair<RowIndex, IntegerValue>& entry : base_rows) {
1883 if (time_limit_->LimitReached()) break;
1884 if (dtime_num_entries > 1e7) break;
1885
1886 const glop::RowIndex base_row = entry.first;
1887 const IntegerValue multiplier = entry.second;
1888
1889 // First try to generate a cut directly from this base row (MIR1).
1890 //
1891 // Note(user): We abort on success like it seems to be done in the
1892 // literature. Note that we don't succeed that often in generating an
1893 // efficient cut, so I am not sure aborting will make a big difference
1894 // speedwise. We might generate similar cuts though, but hopefully the cut
1895 // management can deal with that.
1896 integer_multipliers = {entry};
1897 if ((multiplier > 0 && !integer_lp_[base_row].ub_is_trivial) ||
1898 (multiplier < 0 && !integer_lp_[base_row].lb_is_trivial)) {
1899 if (AddCutFromConstraints("MIR_1", integer_multipliers)) continue;
1900 }
1901
1902 // Cleanup.
1903 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1904 dense_cut[col] = IntegerValue(0);
1905 }
1906 non_zeros_.ResetAllToFalse();
1907
1908 // Copy cut.
1909 const LinearConstraintInternal& ct = integer_lp_[entry.first];
1910 for (int i = 0; i < ct.num_terms; ++i) {
1911 const int index = ct.start_in_buffer + i;
1912 const ColIndex col = integer_lp_cols_[index];
1913 const IntegerValue coeff = integer_lp_coeffs_[index];
1914 non_zeros_.Set(col);
1915 dense_cut[col] += coeff * multiplier;
1916 }
1917
1918 used_rows.assign(num_rows, false);
1919 used_rows[entry.first] = true;
1920
1921 // We will aggregate at most kMaxAggregation more rows.
1922 //
1923 // TODO(user): optim + tune.
1924 const int kMaxAggregation = 5;
1925 for (int i = 0; i < kMaxAggregation; ++i) {
1926 // First pick a variable to eliminate. We currently pick a random one with
1927 // a weight that depend on how far it is from its closest bound.
1928 IntegerValue max_magnitude(0);
1929 weights.clear();
1930 std::vector<ColIndex> col_candidates;
1931 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1932 if (dense_cut[col] == 0) continue;
1933
1934 max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col]));
1935 const int col_degree = matrix.ColumnNumEntries(col).value();
1936 if (col_degree <= 1) continue;
1937 if (simplex_.GetVariableStatus(col) != glop::VariableStatus::BASIC) {
1938 continue;
1939 }
1940
1941 const IntegerVariable var = integer_variables_[col.value()];
1942 const double lp_value = expanded_lp_solution_[var];
1943 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(var));
1944 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(var));
1945 const double bound_distance = std::min(ub - lp_value, lp_value - lb);
1946 if (bound_distance > 1e-2) {
1947 weights.push_back(bound_distance);
1948 col_candidates.push_back(col);
1949 }
1950 }
1951 if (col_candidates.empty()) break;
1952
1953 const ColIndex var_to_eliminate =
1954 col_candidates[WeightedPick(weights, *random_)];
1955
1956 // What rows can we add to eliminate var_to_eliminate?
1957 std::vector<RowIndex> possible_rows;
1958 weights.clear();
1959 for (const auto entry_index : matrix.Column(var_to_eliminate)) {
1960 const RowIndex row = matrix.EntryRow(entry_index);
1961 const glop::Fractional coeff = matrix.EntryCoefficient(entry_index);
1962
1963 // We disallow all the rows that contain a variable that we already
1964 // eliminated (or are about to). This mean that we choose rows that
1965 // form a "triangular" matrix on the position we choose to eliminate.
1966 if (used_rows[row]) continue;
1967 used_rows[row] = true;
1968
1969 // Note that we consider all rows here, not only tight one. This makes a
1970 // big difference on problem like blp-ic98.pb.gz. We can also use the
1971 // integrality of the slack when adding a non-tight row to derive good
1972 // cuts. Also, non-tight row will have a low weight, so they should
1973 // still be chosen after the tight-one in most situation.
1974 bool add_row = false;
1975 if (!integer_lp_[row].ub_is_trivial) {
1976 if (coeff > 0.0) {
1977 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1978 } else {
1979 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1980 }
1981 }
1982 if (!integer_lp_[row].lb_is_trivial) {
1983 if (coeff > 0.0) {
1984 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1985 } else {
1986 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1987 }
1988 }
1989 if (add_row) {
1990 possible_rows.push_back(row);
1991 weights.push_back(row_weights[row]);
1992 }
1993 }
1994 if (possible_rows.empty()) break;
1995
1996 const RowIndex row_to_combine =
1997 possible_rows[WeightedPick(weights, *random_)];
1998
1999 // Find the coefficient of the variable to eliminate.
2000 IntegerValue to_combine_coeff = 0;
2001 const LinearConstraintInternal& ct_to_combine =
2002 integer_lp_[row_to_combine];
2003 for (int i = 0; i < ct_to_combine.num_terms; ++i) {
2004 const int index = ct_to_combine.start_in_buffer + i;
2005 if (integer_lp_cols_[index] == var_to_eliminate) {
2006 to_combine_coeff = integer_lp_coeffs_[index];
2007 break;
2008 }
2009 }
2010 CHECK_NE(to_combine_coeff, 0);
2011
2012 IntegerValue mult1 = -to_combine_coeff;
2013 IntegerValue mult2 = dense_cut[var_to_eliminate];
2014 CHECK_NE(mult2, 0);
2015 if (mult1 < 0) {
2016 mult1 = -mult1;
2017 mult2 = -mult2;
2018 }
2019
2020 const IntegerValue gcd = IntegerValue(
2021 MathUtil::GCD64(std::abs(mult1.value()), std::abs(mult2.value())));
2022 CHECK_NE(gcd, 0);
2023 mult1 /= gcd;
2024 mult2 /= gcd;
2025
2026 // Overflow detection.
2027 //
2028 // TODO(user): do that in the possible_rows selection? only problem is
2029 // that we do not have the integer coefficient there...
2030 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
2031 max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.second));
2032 }
2033 if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())),
2034 CapProd(infinity_norms_[row_to_combine].value(),
2035 std::abs(mult2.value()))) ==
2036 std::numeric_limits<int64_t>::max()) {
2037 break;
2038 }
2039
2040 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
2041 dtime_num_entries += integer_lp_[entry.first].num_terms;
2042 entry.second *= mult1;
2043 }
2044 dtime_num_entries += integer_lp_[row_to_combine].num_terms;
2045 integer_multipliers.push_back({row_to_combine, mult2});
2046
2047 // TODO(user): Not supper efficient to recombine the rows.
2048 if (AddCutFromConstraints(absl::StrCat("MIR_", i + 2),
2049 integer_multipliers)) {
2050 break;
2051 }
2052
2053 // Minor optim: the computation below is only needed if we do one more
2054 // iteration.
2055 if (i + 1 == kMaxAggregation) break;
2056
2057 for (ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
2058 dense_cut[col] *= mult1;
2059 }
2060 for (int i = 0; i < ct_to_combine.num_terms; ++i) {
2061 const int index = ct_to_combine.start_in_buffer + i;
2062 const ColIndex col = integer_lp_cols_[index];
2063 const IntegerValue coeff = integer_lp_coeffs_[index];
2064 non_zeros_.Set(col);
2065 dense_cut[col] += coeff * mult2;
2066 }
2067 }
2068 }
2069}
2070
2071void LinearProgrammingConstraint::AddZeroHalfCuts() {
2072 if (time_limit_->LimitReached()) return;
2073
2074 tmp_lp_values_.clear();
2075 tmp_var_lbs_.clear();
2076 tmp_var_ubs_.clear();
2077 for (const IntegerVariable var : integer_variables_) {
2078 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
2079 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
2080 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
2081 }
2082
2083 // TODO(user): See if it make sense to try to use implied bounds there.
2084 zero_half_cut_helper_.ProcessVariables(tmp_lp_values_, tmp_var_lbs_,
2085 tmp_var_ubs_);
2086 for (glop::RowIndex row(0); row < integer_lp_.size(); ++row) {
2087 // Even though we could use non-tight row, for now we prefer to use tight
2088 // ones.
2089 const auto status = simplex_.GetConstraintStatus(row);
2090 if (status == glop::ConstraintStatus::BASIC) continue;
2091 if (status == glop::ConstraintStatus::FREE) continue;
2092
2093 zero_half_cut_helper_.AddOneConstraint(
2094 row, IntegerLpRowCols(row), IntegerLpRowCoeffs(row),
2095 integer_lp_[row].lb, integer_lp_[row].ub);
2096 }
2097 for (const std::vector<std::pair<RowIndex, IntegerValue>>& multipliers :
2098 zero_half_cut_helper_.InterestingCandidates(random_)) {
2099 if (time_limit_->LimitReached()) break;
2100
2101 // TODO(user): Make sure that if the resulting linear coefficients are not
2102 // too high, we do try a "divisor" of two and thus try a true zero-half cut
2103 // instead of just using our best MIR heuristic (which might still be better
2104 // though).
2105 AddCutFromConstraints("ZERO_HALF", multipliers);
2106 }
2107}
2108
2109void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
2110 const int64_t min_iter, const int64_t max_iter) {
2111 if (parameters_.linearization_level() < 2) return;
2112 const int64_t num_degenerate_columns = CalculateDegeneracy();
2113 const int64_t num_cols = simplex_.GetProblemNumCols().value();
2114 if (num_cols <= 0) {
2115 return;
2116 }
2117 CHECK_GT(num_cols, 0);
2118 const int64_t decrease_factor = (10 * num_degenerate_columns) / num_cols;
2119 if (simplex_.GetProblemStatus() == glop::ProblemStatus::DUAL_FEASIBLE) {
2120 // We reached here probably because we predicted wrong. We use this as a
2121 // signal to increase the iterations or punish less for degeneracy compare
2122 // to the other part.
2123 if (is_degenerate_) {
2124 next_simplex_iter_ /= std::max(int64_t{1}, decrease_factor);
2125 } else {
2126 next_simplex_iter_ *= 2;
2127 }
2128 } else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
2129 if (is_degenerate_) {
2130 next_simplex_iter_ /= std::max(int64_t{1}, 2 * decrease_factor);
2131 } else {
2132 // This is the most common case. We use the size of the problem to
2133 // determine the limit and ignore the previous limit.
2134 next_simplex_iter_ = num_cols / 40;
2135 }
2136 }
2137 next_simplex_iter_ =
2138 std::max(min_iter, std::min(max_iter, next_simplex_iter_));
2139}
2140
2142 CHECK(!sat_solver_->ModelIsUnsat());
2143
2144 const int old_num_force = num_force_lp_call_on_next_propagate_;
2145 num_force_lp_call_on_next_propagate_ = 0;
2146 if (!enabled_) return true;
2147 if (time_limit_->LimitReached()) return true;
2148 const int64_t timestamp_at_function_start = integer_trail_->num_enqueues();
2149
2150 // Because of AlwaysCallAtLevelZero(), this function might be called many time
2151 // at level zero in case of frequent restart, or if we just call Propagate()
2152 // many time in a row. This could happen because most of the code assumes
2153 // Propagate() is O(1) at fixed point.
2154 //
2155 // To alleviate any issue, we have some throttling in place, were we abort
2156 // if the deterministic time spent after the last level zero "solve" is lower
2157 // than the effort spent on that last solve.
2158 //
2159 // TODO(user): also use the logic of lp_at_level_zero_is_final_. If we don't
2160 // have new info, there is no reason to rerun it. harder to make sure we
2161 // don't miss anything though.
2162 const double dtime_at_function_start =
2163 time_limit_->GetElapsedDeterministicTime();
2164 if (trail_->CurrentDecisionLevel() == 0 && old_num_force == 0) {
2165 const double interval =
2166 dtime_at_function_start - last_root_level_deterministic_time_;
2167 if (last_root_level_deterministic_duration_ > interval) {
2168 ++num_root_level_skips_;
2169 return true;
2170 }
2171 ++num_root_level_solves_;
2172 }
2173 const auto cleanup = ::absl::MakeCleanup([dtime_at_function_start, this]() {
2174 if (trail_->CurrentDecisionLevel() == 0) {
2175 const double dtime_at_exit = time_limit_->GetElapsedDeterministicTime();
2176 last_root_level_deterministic_time_ = dtime_at_exit;
2177 last_root_level_deterministic_duration_ =
2178 dtime_at_exit - dtime_at_function_start;
2179 }
2180 });
2181
2182 UpdateBoundsOfLpVariables();
2183
2184 // TODO(user): It seems the time we loose by not stopping early might be worth
2185 // it because we end up with a better explanation at optimality.
2186 if (/* DISABLES CODE */ (false) && objective_is_defined_) {
2187 // We put a limit on the dual objective since there is no point increasing
2188 // it past our current objective upper-bound (we will already fail as soon
2189 // as we pass it). Note that this limit is properly transformed using the
2190 // objective scaling factor and offset stored in lp_data_.
2191 //
2192 // Note that we use a bigger epsilon here to be sure that if we abort
2193 // because of this, we will report a conflict.
2194 simplex_params_.set_objective_upper_limit(
2195 static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
2196 100.0 * kCpEpsilon));
2197 }
2198
2199 // Put an iteration limit on the work we do in the simplex for this call. Note
2200 // that because we are "incremental", even if we don't solve it this time we
2201 // will make progress towards a solve in the lower node of the tree search.
2202 if (trail_->CurrentDecisionLevel() == 0) {
2203 simplex_params_.set_max_number_of_iterations(
2204 parameters_.root_lp_iterations());
2205 } else {
2206 simplex_params_.set_max_number_of_iterations(next_simplex_iter_);
2207 }
2208
2209 simplex_.SetParameters(simplex_params_);
2210 simplex_.SetRandom(*random_);
2211 if (!SolveLp()) return true;
2212 if (!AnalyzeLp()) return false;
2213
2214 // Add new constraints to the LP and resolve?
2215 const int max_cuts_rounds = trail_->CurrentDecisionLevel() == 0
2216 ? parameters_.max_cut_rounds_at_level_zero()
2217 : 1;
2218 int cuts_round = 0;
2219 while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
2220 cuts_round < max_cuts_rounds) {
2221 // We are about to spend some effort finding cuts or changing the LP.
2222 // If one of the LP solve done by this function propagated something, it
2223 // seems better to reach the propagation fix point first before doing that.
2224 //
2225 // Heuristic: if we seem to be in a propagation loop, we might need more
2226 // cut/lazy-constraints in order to just get out of it, so we loop only if
2227 // num_force_lp_call_on_next_propagate_ is small.
2228 if (integer_trail_->num_enqueues() > timestamp_at_function_start &&
2229 old_num_force < 3) {
2230 num_force_lp_call_on_next_propagate_ = old_num_force + 1;
2231 watcher_->CallAgainDuringThisPropagation();
2232 return true;
2233 }
2234
2235 // We wait for the first batch of problem constraints to be added before we
2236 // begin to generate cuts. Note that we rely on num_solves_ since on some
2237 // problems there is no other constraints than the cuts.
2238 cuts_round++;
2239 if (parameters_.cut_level() > 0 &&
2240 (num_solves_ > 1 || !parameters_.add_lp_constraints_lazily())) {
2241 // This must be called first.
2242 implied_bounds_processor_.RecomputeCacheAndSeparateSomeImpliedBoundCuts(
2243 expanded_lp_solution_);
2244 if (parameters_.add_rlt_cuts()) {
2245 rlt_cut_helper_.Initialize(extended_integer_variables_);
2246 }
2247
2248 // The "generic" cuts are currently part of this class as they are using
2249 // data from the current LP.
2250 //
2251 // TODO(user): Refactor so that they are just normal cut generators?
2252 const int level = trail_->CurrentDecisionLevel();
2253 if (trail_->CurrentDecisionLevel() == 0) {
2254 problem_proven_infeasible_by_cuts_ = false;
2255 if (parameters_.add_objective_cut()) AddObjectiveCut();
2256 if (parameters_.add_mir_cuts()) AddMirCuts();
2257 if (parameters_.add_cg_cuts()) AddCGCuts();
2258 if (parameters_.add_zero_half_cuts()) AddZeroHalfCuts();
2259 if (problem_proven_infeasible_by_cuts_) {
2260 return integer_trail_->ReportConflict({});
2261 }
2262 top_n_cuts_.TransferToManager(&constraint_manager_);
2263 }
2264
2265 // Try to add cuts.
2266 if (level == 0 || !parameters_.only_add_cuts_at_level_zero()) {
2267 for (CutGenerator& generator : cut_generators_) {
2268 if (level > 0 && generator.only_run_at_level_zero) continue;
2269 if (!generator.generate_cuts(&constraint_manager_)) return false;
2270 }
2271 }
2272
2273 implied_bounds_processor_.IbCutPool().TransferToManager(
2274 &constraint_manager_);
2275 }
2276
2277 int num_added = 0;
2278 if (constraint_manager_.ChangeLp(&state_, &num_added)) {
2279 ++num_lp_changes_;
2280 simplex_.LoadStateForNextSolve(state_);
2281 if (!CreateLpFromConstraintManager()) {
2282 sat_solver_->NotifyThatModelIsUnsat();
2283 return integer_trail_->ReportConflict({});
2284 }
2285
2286 // If we didn't add any new constraint, we delay the next Solve() since
2287 // likely the optimal didn't change.
2288 if (num_added == 0) {
2289 break;
2290 }
2291
2292 const double old_obj = simplex_.GetObjectiveValue();
2293 if (!SolveLp()) return true;
2294 if (!AnalyzeLp()) return false;
2295
2296 if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
2297 VLOG(3) << "Relaxation improvement " << old_obj << " -> "
2298 << simplex_.GetObjectiveValue()
2299 << " diff: " << simplex_.GetObjectiveValue() - old_obj
2300 << " level: " << trail_->CurrentDecisionLevel();
2301 }
2302 } else {
2303 if (trail_->CurrentDecisionLevel() == 0) {
2304 lp_at_level_zero_is_final_ = true;
2305 }
2306 break;
2307 }
2308 }
2309
2310 return true;
2311}
2312
2313absl::int128 LinearProgrammingConstraint::GetImpliedLowerBound(
2314 const LinearConstraint& terms) const {
2315 absl::int128 lower_bound(0);
2316 const int size = terms.num_terms;
2317 for (int i = 0; i < size; ++i) {
2318 const IntegerVariable var = terms.vars[i];
2319 const IntegerValue coeff = terms.coeffs[i];
2320 const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
2321 : integer_trail_->UpperBound(var);
2322 lower_bound += absl::int128(bound.value()) * absl::int128(coeff.value());
2323 }
2324 return lower_bound;
2325}
2326
2327bool LinearProgrammingConstraint::ScalingCanOverflow(
2328 int power, bool take_objective_into_account,
2329 absl::Span<const std::pair<glop::RowIndex, double>> multipliers,
2330 int64_t overflow_cap) const {
2331 int64_t bound = 0;
2332 const int64_t factor = int64_t{1} << power;
2333 const double factor_as_double = static_cast<double>(factor);
2334 if (take_objective_into_account) {
2335 bound = CapAdd(bound, CapProd(factor, objective_infinity_norm_.value()));
2336 if (bound >= overflow_cap) return true;
2337 }
2338 for (const auto [row, double_coeff] : multipliers) {
2339 const double magnitude =
2340 std::abs(std::round(double_coeff * factor_as_double));
2341 if (std::isnan(magnitude)) return true;
2342 if (magnitude >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
2343 return true;
2344 }
2345 bound = CapAdd(bound, CapProd(static_cast<int64_t>(magnitude),
2346 infinity_norms_[row].value()));
2347 if (bound >= overflow_cap) return true;
2348 }
2349 return bound >= overflow_cap;
2350}
2351
2352void LinearProgrammingConstraint::IgnoreTrivialConstraintMultipliers(
2353 std::vector<std::pair<RowIndex, double>>* lp_multipliers) {
2354 int new_size = 0;
2355 for (const std::pair<RowIndex, double>& p : *lp_multipliers) {
2356 const RowIndex row = p.first;
2357 const Fractional lp_multi = p.second;
2358 if (lp_multi > 0.0 && integer_lp_[row].ub_is_trivial) continue;
2359 if (lp_multi < 0.0 && integer_lp_[row].lb_is_trivial) continue;
2360 (*lp_multipliers)[new_size++] = p;
2361 }
2362 lp_multipliers->resize(new_size);
2363}
2364
2365void LinearProgrammingConstraint::ScaleMultipliers(
2366 absl::Span<const std::pair<RowIndex, double>> lp_multipliers,
2367 bool take_objective_into_account, IntegerValue* scaling,
2368 std::vector<std::pair<RowIndex, IntegerValue>>* output) const {
2369 *scaling = 0;
2370
2371 output->clear();
2372 if (lp_multipliers.empty()) {
2373 // Empty linear combinaison.
2374 return;
2375 }
2376
2377 // TODO(user): we currently do not support scaling down, so we just abort
2378 // if with a scaling of 1, we reach the overflow_cap.
2379 const int64_t overflow_cap = std::numeric_limits<int64_t>::max();
2380 if (ScalingCanOverflow(/*power=*/0, take_objective_into_account,
2381 lp_multipliers, overflow_cap)) {
2382 ++num_scaling_issues_;
2383 return;
2384 }
2385
2386 // Note that we don't try to scale by more than 63 since in practice the
2387 // constraint multipliers should not be all super small.
2388 //
2389 // TODO(user): We could be faster here, but trying to compute the exact power
2390 // in one go with floating points seems tricky. So we just do around 6 passes
2391 // plus the one above for zero.
2392 const int power = BinarySearch<int>(0, 63, [&](int candidate) {
2393 // Because BinarySearch() wants f(upper_bound) to fail, we bypass the test
2394 // here as when the set of floating points are really small, we could pass
2395 // with a large power.
2396 if (candidate >= 63) return false;
2397
2398 return !ScalingCanOverflow(candidate, take_objective_into_account,
2399 lp_multipliers, overflow_cap);
2400 });
2401 *scaling = int64_t{1} << power;
2402
2403 // Scale the multipliers by *scaling.
2404 // Note that we use the exact same formula as in ScalingCanOverflow().
2405 int64_t gcd = scaling->value();
2406 const double scaling_as_double = static_cast<double>(scaling->value());
2407 for (const auto [row, double_coeff] : lp_multipliers) {
2408 const IntegerValue coeff(std::round(double_coeff * scaling_as_double));
2409 if (coeff != 0) {
2410 gcd = std::gcd(gcd, std::abs(coeff.value()));
2411 output->push_back({row, coeff});
2412 }
2413 }
2414 if (gcd > 1) {
2415 *scaling /= gcd;
2416 for (auto& entry : *output) {
2417 entry.second /= gcd;
2418 }
2419 }
2420}
2421
2422template <bool check_overflow>
2423bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
2424 absl::Span<const std::pair<RowIndex, IntegerValue>> integer_multipliers,
2425 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
2426 // Initialize the new constraint.
2427 *upper_bound = 0;
2428 scattered_vector->ClearAndResize(extended_integer_variables_.size());
2429
2430 // Compute the new constraint by taking the linear combination given by
2431 // integer_multipliers of the integer constraints in integer_lp_.
2432 for (const std::pair<RowIndex, IntegerValue>& term : integer_multipliers) {
2433 const RowIndex row = term.first;
2434 const IntegerValue multiplier = term.second;
2435 DCHECK_LT(row, integer_lp_.size());
2436
2437 // Update the constraint.
2438 if (!scattered_vector->AddLinearExpressionMultiple<check_overflow>(
2439 multiplier, IntegerLpRowCols(row), IntegerLpRowCoeffs(row),
2440 infinity_norms_[row])) {
2441 return false;
2442 }
2443
2444 // Update the upper bound.
2445 const IntegerValue bound =
2446 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
2447 if (!AddProductTo(multiplier, bound, upper_bound)) return false;
2448 }
2449
2450 return true;
2451}
2452
2453// TODO(user): no need to update the multipliers.
2454void LinearProgrammingConstraint::AdjustNewLinearConstraint(
2455 std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
2456 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
2457 const IntegerValue kMaxWantedCoeff(1e18);
2458 bool adjusted = false;
2459 for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
2460 const RowIndex row = term.first;
2461 const IntegerValue multiplier = term.second;
2462 if (multiplier == 0) continue;
2463
2464 // We will only allow change of the form "multiplier += to_add" with to_add
2465 // in [-negative_limit, positive_limit].
2466 //
2467 // We do not want to_add * row to overflow.
2468 IntegerValue negative_limit =
2469 FloorRatio(kMaxWantedCoeff, infinity_norms_[row]);
2470 IntegerValue positive_limit = negative_limit;
2471
2472 // Make sure we never change the sign of the multiplier, except if the
2473 // row is an equality in which case we don't care.
2474 if (integer_lp_[row].ub != integer_lp_[row].lb) {
2475 if (multiplier > 0) {
2476 negative_limit = std::min(negative_limit, multiplier);
2477 } else {
2478 positive_limit = std::min(positive_limit, -multiplier);
2479 }
2480 }
2481
2482 // Make sure upper_bound + to_add * row_bound never overflow.
2483 const IntegerValue row_bound =
2484 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
2485 if (row_bound != 0) {
2486 const IntegerValue limit1 = FloorRatio(
2487 std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
2488 IntTypeAbs(row_bound));
2489 const IntegerValue limit2 =
2490 FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
2491 if ((*upper_bound > 0) == (row_bound > 0)) { // Same sign.
2492 positive_limit = std::min(positive_limit, limit1);
2493 negative_limit = std::min(negative_limit, limit2);
2494 } else {
2495 negative_limit = std::min(negative_limit, limit1);
2496 positive_limit = std::min(positive_limit, limit2);
2497 }
2498 }
2499
2500 // If we add the row to the scattered_vector, diff will indicate by how much
2501 // |upper_bound - ImpliedLB(scattered_vector)| will change. That correspond
2502 // to increasing the multiplier by 1.
2503 //
2504 // At this stage, we are not sure computing sum coeff * bound will not
2505 // overflow, so we use floating point numbers. It is fine to do so since
2506 // this is not directly involved in the actual exact constraint generation:
2507 // these variables are just used in an heuristic.
2508 double common_diff = ToDouble(row_bound);
2509 double positive_diff = 0.0;
2510 double negative_diff = 0.0;
2511
2512 // TODO(user): we could relax a bit some of the condition and allow a sign
2513 // change. It is just trickier to compute the diff when we allow such
2514 // changes.
2515 const LinearConstraintInternal& ct = integer_lp_[row];
2516 for (int i = 0; i < ct.num_terms; ++i) {
2517 const int index = ct.start_in_buffer + i;
2518 const ColIndex col = integer_lp_cols_[index];
2519 const IntegerValue coeff = integer_lp_coeffs_[index];
2520 DCHECK_NE(coeff, 0);
2521
2522 // Moving a variable away from zero seems to improve the bound even
2523 // if it reduces the number of non-zero. Note that this is because of
2524 // this that positive_diff and negative_diff are not the same.
2525 const IntegerValue current = (*scattered_vector)[col];
2526 if (current == 0) {
2527 const IntegerVariable var = integer_variables_[col.value()];
2528 const IntegerValue lb = integer_trail_->LowerBound(var);
2529 const IntegerValue ub = integer_trail_->UpperBound(var);
2530 if (coeff > 0) {
2531 positive_diff -= ToDouble(coeff) * ToDouble(lb);
2532 negative_diff -= ToDouble(coeff) * ToDouble(ub);
2533 } else {
2534 positive_diff -= ToDouble(coeff) * ToDouble(ub);
2535 negative_diff -= ToDouble(coeff) * ToDouble(lb);
2536 }
2537 continue;
2538 }
2539
2540 // We don't want to change the sign of current (except if the variable is
2541 // fixed, but in practice we should have removed any fixed variable, so we
2542 // don't care here) or to have an overflow.
2543 //
2544 // Corner case:
2545 // - IntTypeAbs(current) can be larger than kMaxWantedCoeff!
2546 // - The code assumes that 2 * kMaxWantedCoeff do not overflow.
2547 //
2548 // Note that because we now that limit * abs_coeff will never overflow
2549 // because we used infinity_norms_[row] above.
2550 const IntegerValue abs_coeff = IntTypeAbs(coeff);
2551 const IntegerValue current_magnitude = IntTypeAbs(current);
2552 const IntegerValue overflow_threshold =
2553 std::max(IntegerValue(0), kMaxWantedCoeff - current_magnitude);
2554 if ((current > 0) == (coeff > 0)) { // Same sign.
2555 if (negative_limit * abs_coeff > current_magnitude) {
2556 negative_limit = current_magnitude / abs_coeff;
2557 if (positive_limit == 0 && negative_limit == 0) break;
2558 }
2559 if (positive_limit * abs_coeff > overflow_threshold) {
2560 positive_limit = overflow_threshold / abs_coeff;
2561 if (positive_limit == 0 && negative_limit == 0) break;
2562 }
2563 } else {
2564 if (negative_limit * abs_coeff > overflow_threshold) {
2565 negative_limit = overflow_threshold / abs_coeff;
2566 if (positive_limit == 0 && negative_limit == 0) break;
2567 }
2568 if (positive_limit * abs_coeff > current_magnitude) {
2569 positive_limit = current_magnitude / abs_coeff;
2570 if (positive_limit == 0 && negative_limit == 0) break;
2571 }
2572 }
2573
2574 // This is how diff change.
2575 const IntegerVariable var = integer_variables_[col.value()];
2576 const IntegerValue implied = current > 0
2577 ? integer_trail_->LowerBound(var)
2578 : integer_trail_->UpperBound(var);
2579 if (implied != 0) {
2580 common_diff -= ToDouble(coeff) * ToDouble(implied);
2581 }
2582 }
2583
2584 positive_diff += common_diff;
2585 negative_diff += common_diff;
2586
2587 // Only add a multiple of this row if it tighten the final constraint.
2588 // The positive_diff/negative_diff are supposed to be integer modulo the
2589 // double precision, so we only add a multiple if they seems far away from
2590 // zero.
2591 IntegerValue to_add(0);
2592 if (positive_diff <= -1.0 && positive_limit > 0) {
2593 to_add = positive_limit;
2594 }
2595 if (negative_diff >= 1.0 && negative_limit > 0) {
2596 // Pick this if it is better than the positive sign.
2597 if (to_add == 0 ||
2598 std::abs(ToDouble(negative_limit) * negative_diff) >
2599 std::abs(ToDouble(positive_limit) * positive_diff)) {
2600 to_add = -negative_limit;
2601 }
2602 }
2603 if (to_add != 0) {
2604 term.second += to_add;
2605 *upper_bound += to_add * row_bound;
2606 adjusted = true;
2607 CHECK(scattered_vector
2608 ->AddLinearExpressionMultiple</*check_overflow=*/false>(
2609 to_add, IntegerLpRowCols(row), IntegerLpRowCoeffs(row),
2610 infinity_norms_[row]));
2611 }
2612 }
2613 if (adjusted) ++num_adjusts_;
2614}
2615
2616bool LinearProgrammingConstraint::PropagateLpConstraint(LinearConstraint ct) {
2617 DCHECK(constraint_manager_.DebugCheckConstraint(ct));
2618
2619 // We need to cache this before we std::move() the constraint!
2620 const int num_terms = ct.num_terms;
2621 if (num_terms == 0) {
2622 if (ct.ub >= 0) return true;
2623 return integer_trail_->ReportConflict({}); // Unsat.
2624 }
2625
2626 std::unique_ptr<IntegerSumLE128> cp_constraint(
2627 new IntegerSumLE128(std::move(ct), model_));
2628
2629 // We always propagate level zero bounds first.
2630 // If we are at level zero, there is nothing else to do.
2631 if (!cp_constraint->PropagateAtLevelZero()) return false;
2632 if (trail_->CurrentDecisionLevel() == 0) return true;
2633
2634 // To optimize the memory usage, if the constraint didn't propagate anything,
2635 // we don't need to keep it around.
2636 const int64_t stamp = integer_trail_->num_enqueues();
2637 const bool no_conflict = cp_constraint->Propagate();
2638 if (no_conflict && integer_trail_->num_enqueues() == stamp) return true;
2639
2640 const int64_t current_size =
2641 cumulative_optimal_constraint_sizes_.empty()
2642 ? 0
2643 : cumulative_optimal_constraint_sizes_.back();
2644 optimal_constraints_.push_back(std::move(cp_constraint));
2645 cumulative_optimal_constraint_sizes_.push_back(current_size + num_terms);
2646 rev_optimal_constraints_size_ = optimal_constraints_.size();
2647 return no_conflict;
2648}
2649
2650// The "exact" computation go as follows:
2651//
2652// Given any INTEGER linear combination of the LP constraints, we can create a
2653// new integer constraint that is valid (its computation must not overflow
2654// though). Lets call this "linear_combination <= ub". We can then always add to
2655// it the inequality "objective_terms <= objective_var", so we get:
2656// ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
2657// where ImpliedLB() is computed from the variable current bounds.
2658//
2659// Now, if we use for the linear combination and approximation of the optimal
2660// negated dual LP values (by scaling them and rounding them to integer), we
2661// will get an EXACT objective lower bound that is more or less the same as the
2662// inexact bound given by the LP relaxation. This allows to derive exact reasons
2663// for any propagation done by this constraint.
2664bool LinearProgrammingConstraint::PropagateExactLpReason() {
2665 // Clear old reason and deductions.
2666 integer_reason_.clear();
2667 deductions_.clear();
2668 deductions_reason_.clear();
2669
2670 // The row multipliers will be the negation of the LP duals.
2671 //
2672 // TODO(user): Provide and use a sparse API in Glop to get the duals.
2673 const RowIndex num_rows = simplex_.GetProblemNumRows();
2674 tmp_lp_multipliers_.clear();
2675 for (RowIndex row(0); row < num_rows; ++row) {
2676 const double value = -simplex_.GetDualValue(row);
2677 if (std::abs(value) < kZeroTolerance) continue;
2678 tmp_lp_multipliers_.push_back({row, scaler_.UnscaleDualValue(row, value)});
2679 }
2680
2681 // In this case, the LP lower bound match the basic objective "constraint"
2682 // propagation. That is there is an LP solution with all objective variable at
2683 // their current best bound. There is no need to do more work here.
2684 if (tmp_lp_multipliers_.empty()) return true;
2685
2686 // For the corner case of an objective of size 1, we do not want or need
2687 // to take it into account.
2688 bool take_objective_into_account = true;
2689 if (objective_cp_is_part_of_lp_) {
2690 // The objective is part of the lp.
2691 // This should only happen for objective with a single term.
2692 CHECK_EQ(integer_objective_.size(), 1);
2693 CHECK_EQ(integer_objective_[0].first, mirror_lp_variable_[objective_cp_]);
2694 CHECK_EQ(integer_objective_[0].second, IntegerValue(1));
2695
2696 take_objective_into_account = false;
2697 }
2698
2699 IntegerValue scaling = 0;
2700 IgnoreTrivialConstraintMultipliers(&tmp_lp_multipliers_);
2701 ScaleMultipliers(tmp_lp_multipliers_, take_objective_into_account, &scaling,
2702 &tmp_integer_multipliers_);
2703 if (scaling == 0) {
2704 VLOG(1) << simplex_.GetProblemStatus();
2705 VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
2706 return true;
2707 }
2708
2709 IntegerValue rc_ub;
2710 CHECK(ComputeNewLinearConstraint</*check_overflow=*/false>(
2711 tmp_integer_multipliers_, &tmp_scattered_vector_, &rc_ub));
2712
2713 std::optional<std::pair<IntegerVariable, IntegerValue>> extra_term =
2714 std::nullopt;
2715 if (take_objective_into_account) {
2716 // The "objective constraint" behave like if the unscaled cp multiplier was
2717 // 1.0, so we will multiply it by this number and add it to reduced_costs.
2718 const IntegerValue obj_scale = scaling;
2719
2720 // TODO(user): Maybe avoid this conversion.
2721 tmp_cols_.clear();
2722 tmp_coeffs_.clear();
2723 for (const auto [col, coeff] : integer_objective_) {
2724 tmp_cols_.push_back(col);
2725 tmp_coeffs_.push_back(coeff);
2726 }
2727 CHECK(tmp_scattered_vector_
2728 .AddLinearExpressionMultiple</*check_overflow=*/false>(
2729 obj_scale, tmp_cols_, tmp_coeffs_, objective_infinity_norm_));
2730 CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub));
2731
2732 extra_term = {objective_cp_, -obj_scale};
2733 }
2734
2735 // TODO(user): It seems when the LP as a single variable and the equation is
2736 // obj >= lower_bound, this removes it ? For now we disable this if the
2737 // objective variable is part of the LP (i.e. single objective).
2738 if (take_objective_into_account) {
2739 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2740 &rc_ub);
2741 }
2742
2743 // Create the IntegerSumLE that will allow to propagate the objective and more
2744 // generally do the reduced cost fixing.
2745 LinearConstraint explanation =
2746 tmp_scattered_vector_.ConvertToLinearConstraint(
2747 extended_integer_variables_, rc_ub, extra_term);
2748
2749 // Corner case where prevent overflow removed all terms.
2750 if (explanation.num_terms == 0) {
2751 trail_->MutableConflict()->clear();
2752 return explanation.ub >= 0;
2753 }
2754
2755 constraint_manager_.SetReducedCostsAsLinearConstraint(explanation);
2756 return PropagateLpConstraint(std::move(explanation));
2757}
2758
2759bool LinearProgrammingConstraint::PropagateExactDualRay() {
2760 IntegerValue scaling;
2761 const glop::DenseColumn ray = simplex_.GetDualRay();
2762 DCHECK_EQ(ray.size(), integer_lp_.size());
2763 tmp_lp_multipliers_.clear();
2764 for (RowIndex row(0); row < ray.size(); ++row) {
2765 const double value = ray[row];
2766 if (std::abs(value) < kZeroTolerance) continue;
2767
2768 // This is the same as UnscaleLeftSolveValue(). Note that we don't need to
2769 // scale by the objective factor here like we do in UnscaleDualValue().
2770 tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value});
2771 }
2772 IgnoreTrivialConstraintMultipliers(&tmp_lp_multipliers_);
2773 ScaleMultipliers(tmp_lp_multipliers_, /*take_objective_into_account=*/false,
2774 &scaling, &tmp_integer_multipliers_);
2775 if (scaling == 0) {
2776 VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
2777 return true;
2778 }
2779
2780 IntegerValue new_constraint_ub;
2781 CHECK(ComputeNewLinearConstraint</*check_overflow=*/false>(
2782 tmp_integer_multipliers_, &tmp_scattered_vector_, &new_constraint_ub))
2783 << scaling;
2784
2785 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2786 &new_constraint_ub);
2787
2788 LinearConstraint explanation =
2789 tmp_scattered_vector_.ConvertToLinearConstraint(
2790 extended_integer_variables_, new_constraint_ub);
2791
2792 std::string message;
2793 if (VLOG_IS_ON(1)) {
2794 // Unfortunately, we need to set this up before we std::move() it.
2795 message = absl::StrCat("LP exact dual ray not infeasible,",
2796 " implied_lb: ", GetImpliedLowerBound(explanation),
2797 " ub: ", explanation.ub.value());
2798 }
2799
2800 // This should result in a conflict if the precision is good enough.
2801 if (!PropagateLpConstraint(std::move(explanation))) return false;
2802
2803 VLOG(1) << message;
2804 return true;
2805}
2806
2807int64_t LinearProgrammingConstraint::CalculateDegeneracy() {
2808 int num_non_basic_with_zero_rc = 0;
2809 const auto reduced_costs = simplex_.GetReducedCosts().const_view();
2810 for (const glop::ColIndex i : simplex_.GetNotBasicBitRow()) {
2811 if (reduced_costs[i] == 0.0) {
2812 num_non_basic_with_zero_rc++;
2813 }
2814 }
2815 const int64_t num_cols = simplex_.GetProblemNumCols().value();
2816 is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols;
2817 return num_non_basic_with_zero_rc;
2818}
2819
2820void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
2821 double cp_objective_delta) {
2822 deductions_.clear();
2823
2824 const double lp_objective_delta =
2825 cp_objective_delta / scaler_.ObjectiveScalingFactor();
2826 const int num_vars = integer_variables_.size();
2827 for (int i = 0; i < num_vars; i++) {
2828 const IntegerVariable cp_var = integer_variables_[i];
2829 const glop::ColIndex lp_var = glop::ColIndex(i);
2830 const double rc = simplex_.GetReducedCost(lp_var);
2831 const double value = simplex_.GetVariableValue(lp_var);
2832
2833 if (rc == 0.0) continue;
2834 const double lp_other_bound = value + lp_objective_delta / rc;
2835 const double cp_other_bound =
2836 scaler_.UnscaleVariableValue(lp_var, lp_other_bound);
2837
2838 if (rc > kLpEpsilon) {
2839 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
2840 const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
2841 if (new_ub < ub) {
2842 // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
2843 // will be part of the reason returned by FillReducedCostsReason(), but
2844 // we actually do not need it here. Same below.
2845 const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
2846 deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
2847 }
2848 } else if (rc < -kLpEpsilon) {
2849 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
2850 const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
2851 if (new_lb > lb) {
2852 const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
2853 deductions_.push_back(
2854 IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
2855 }
2856 }
2857 }
2858}
2859
2860void LinearProgrammingConstraint::UpdateAverageReducedCosts() {
2861 const int num_vars = integer_variables_.size();
2862 if (sum_cost_down_.size() < num_vars) {
2863 sum_cost_down_.resize(num_vars, 0.0);
2864 num_cost_down_.resize(num_vars, 0);
2865 sum_cost_up_.resize(num_vars, 0.0);
2866 num_cost_up_.resize(num_vars, 0);
2867 rc_scores_.resize(num_vars, 0.0);
2868 }
2869
2870 // Decay averages.
2871 num_calls_since_reduced_cost_averages_reset_++;
2872 if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
2873 for (int i = 0; i < num_vars; i++) {
2874 sum_cost_up_[i] /= 2;
2875 num_cost_up_[i] /= 2;
2876 sum_cost_down_[i] /= 2;
2877 num_cost_down_[i] /= 2;
2878 }
2879 num_calls_since_reduced_cost_averages_reset_ = 0;
2880 }
2881
2882 // Accumulate reduced costs of all unassigned variables.
2883 for (int i = 0; i < num_vars; i++) {
2884 const IntegerVariable var = integer_variables_[i];
2885
2886 // Skip fixed variables.
2887 if (integer_trail_->IsFixed(var)) continue;
2888
2889 // Skip reduced costs that are zero or close.
2890 const double rc = lp_reduced_cost_[i];
2891 if (std::abs(rc) < kCpEpsilon) continue;
2892
2893 if (rc < 0.0) {
2894 sum_cost_down_[i] -= rc;
2895 num_cost_down_[i]++;
2896 } else {
2897 sum_cost_up_[i] += rc;
2898 num_cost_up_[i]++;
2899 }
2900 }
2901
2902 // Tricky, we artificially reset the rc_rev_int_repository_ to level zero
2903 // so that the rev_rc_start_ is zero.
2904 rc_rev_int_repository_.SetLevel(0);
2905 rc_rev_int_repository_.SetLevel(trail_->CurrentDecisionLevel());
2906 rev_rc_start_ = 0;
2907
2908 // Cache the new score (higher is better) using the average reduced costs
2909 // as a signal.
2910 positions_by_decreasing_rc_score_.clear();
2911 for (int i = 0; i < num_vars; i++) {
2912 // If only one direction exist, we takes its value divided by 2, so that
2913 // such variable should have a smaller cost than the min of the two side
2914 // except if one direction have a really high reduced costs.
2915 const double a_up =
2916 num_cost_up_[i] > 0 ? sum_cost_up_[i] / num_cost_up_[i] : 0.0;
2917 const double a_down =
2918 num_cost_down_[i] > 0 ? sum_cost_down_[i] / num_cost_down_[i] : 0.0;
2919 if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
2920 rc_scores_[i] = std::min(a_up, a_down);
2921 } else {
2922 rc_scores_[i] = 0.5 * (a_down + a_up);
2923 }
2924
2925 // We ignore scores of zero (i.e. no data) and will follow the default
2926 // search heuristic if all variables are like this.
2927 if (rc_scores_[i] > 0.0) {
2928 positions_by_decreasing_rc_score_.push_back({-rc_scores_[i], i});
2929 }
2930 }
2931 std::sort(positions_by_decreasing_rc_score_.begin(),
2932 positions_by_decreasing_rc_score_.end());
2933}
2934
2935std::function<IntegerLiteral()>
2937 return [this]() { return this->LPReducedCostAverageDecision(); };
2938}
2939
2940IntegerLiteral LinearProgrammingConstraint::LPReducedCostAverageDecision() {
2941 // Select noninstantiated variable with highest positive average reduced cost.
2942 int selected_index = -1;
2943 const int size = positions_by_decreasing_rc_score_.size();
2944 rc_rev_int_repository_.SaveState(&rev_rc_start_);
2945 for (int i = rev_rc_start_; i < size; ++i) {
2946 const int index = positions_by_decreasing_rc_score_[i].second;
2947 const IntegerVariable var = integer_variables_[index];
2948 if (integer_trail_->IsFixed(var)) continue;
2949 selected_index = index;
2950 rev_rc_start_ = i;
2951 break;
2952 }
2953
2954 if (selected_index == -1) return IntegerLiteral();
2955 const IntegerVariable var = integer_variables_[selected_index];
2956
2957 // If ceil(value) is current upper bound, try var == upper bound first.
2958 // Guarding with >= prevents numerical problems.
2959 // With 0/1 variables, this will tend to try setting to 1 first,
2960 // which produces more shallow trees.
2961 const IntegerValue ub = integer_trail_->UpperBound(var);
2962 const IntegerValue value_ceil(
2963 std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
2964 if (value_ceil >= ub) {
2965 return IntegerLiteral::GreaterOrEqual(var, ub);
2966 }
2967
2968 // If floor(value) is current lower bound, try var == lower bound first.
2969 // Guarding with <= prevents numerical problems.
2970 const IntegerValue lb = integer_trail_->LowerBound(var);
2971 const IntegerValue value_floor(
2972 std::floor(this->GetSolutionValue(var) + kCpEpsilon));
2973 if (value_floor <= lb) {
2974 return IntegerLiteral::LowerOrEqual(var, lb);
2975 }
2976
2977 // Here lb < value_floor <= value_ceil < ub.
2978 // Try the most promising split between var <= floor or var >= ceil.
2979 const double a_up =
2980 num_cost_up_[selected_index] > 0
2981 ? sum_cost_up_[selected_index] / num_cost_up_[selected_index]
2982 : 0.0;
2983 const double a_down =
2984 num_cost_down_[selected_index] > 0
2985 ? sum_cost_down_[selected_index] / num_cost_down_[selected_index]
2986 : 0.0;
2987 if (a_down < a_up) {
2988 return IntegerLiteral::LowerOrEqual(var, value_floor);
2989 } else {
2990 return IntegerLiteral::GreaterOrEqual(var, value_ceil);
2991 }
2992}
2993
2994absl::Span<const glop::ColIndex> LinearProgrammingConstraint::IntegerLpRowCols(
2995 glop::RowIndex row) const {
2996 const int start = integer_lp_[row].start_in_buffer;
2997 const size_t num_terms = static_cast<size_t>(integer_lp_[row].num_terms);
2998 return {integer_lp_cols_.data() + start, num_terms};
2999}
3000
3001absl::Span<const IntegerValue> LinearProgrammingConstraint::IntegerLpRowCoeffs(
3002 glop::RowIndex row) const {
3003 const int start = integer_lp_[row].start_in_buffer;
3004 const size_t num_terms = static_cast<size_t>(integer_lp_[row].num_terms);
3005 return {integer_lp_coeffs_.data() + start, num_terms};
3006}
3007
3009 return absl::StrFormat("%d rows, %d columns, %d entries", integer_lp_.size(),
3010 integer_variables_.size(), integer_lp_coeffs_.size());
3011}
3012
3013} // namespace sat
3014} // namespace operations_research
StrictITISpan< RowIndex, const Fractional > ConstView
Definition lp_types.h:291
static int64_t GCD64(int64_t x, int64_t y)
Definition mathutil.h:107
void SaveState(T *object)
Definition rev.h:62
Fractional VariableScalingFactor(ColIndex col) const
Fractional UnscaleVariableValue(ColIndex col, Fractional value) const
Fractional GetVariableValue(ColIndex col) const
IntegerValue LowerBound(IntegerVariable i) const
Definition integer.h:1537
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1541
bool IsFixed(IntegerVariable i) const
Definition integer.h:1545
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition integer.h:1657
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Definition integer.h:1650
const std::vector< ConstraintIndex > & LpConstraints() const
const util_intops::StrongVector< ConstraintIndex, ConstraintInfo > & AllConstraints() const
void SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff)
bool IncrementalPropagate(const std::vector< int > &watch_indices) override
LinearProgrammingConstraint(Model *model, absl::Span< const IntegerVariable > vars)
static constexpr SearchBranching LP_SEARCH
bool Add(glop::ColIndex col, IntegerValue value)
LinearConstraint ConvertToLinearConstraint(absl::Span< const IntegerVariable > integer_variables, IntegerValue upper_bound, std::optional< std::pair< IntegerVariable, IntegerValue > > extra_term=std::nullopt)
void ConvertToCutData(absl::int128 rhs, absl::Span< const IntegerVariable > integer_variables, absl::Span< const double > lp_solution, IntegerTrail *integer_trail, CutData *result)
std::vector< std::pair< glop::ColIndex, IntegerValue > > GetTerms()
bool AddLinearExpressionMultiple(IntegerValue multiplier, absl::Span< const glop::ColIndex > cols, absl::Span< const IntegerValue > coeffs, IntegerValue max_coeff_magnitude)
void assign(size_type n, const value_type &val)
void push_back(const value_type &val)
constexpr ColIndex kInvalidCol(-1)
ColIndex RowToColIndex(RowIndex row)
Definition lp_types.h:54
RowIndex ColToRowIndex(ColIndex col)
Definition lp_types.h:57
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition lp_types.h:380
StrictITIVector< ColIndex, Fractional > DenseRow
Definition lp_types.h:351
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
void DivideByGCD(LinearConstraint *constraint)
IntType IntTypeAbs(IntType t)
std::vector< IntegerVariable > NegationOf(absl::Span< const IntegerVariable > vars)
Definition integer.cc:52
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
IntegerVariable PositiveVariable(IntegerVariable i)
int WeightedPick(absl::Span< const double > input, absl::BitGenRef random)
Definition util.cc:415
bool VariableIsPositive(IntegerVariable i)
LinearConstraintPropagator< true > IntegerSumLE128
IntegerValue CapProdI(IntegerValue a, IntegerValue b)
double ToDouble(IntegerValue value)
OR-Tools root namespace.
bool AtMinOrMaxInt64(int64_t x)
int64_t CapAdd(int64_t x, int64_t y)
bool AddIntoOverflow(int64_t x, int64_t *y)
const bool DEBUG_MODE
Definition radix_sort.h:266
Point BinarySearch(Point x_true, Point x_false, std::function< bool(Point)> f)
int64_t CapProd(int64_t x, int64_t y)
std::vector< CutTerm > terms
Definition cuts.h:153
bool AppendOneTerm(IntegerVariable var, IntegerValue coeff, double lp_value, IntegerValue lb, IntegerValue ub)
Definition cuts.cc:137
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
std::unique_ptr< IntegerValue[]> coeffs
std::unique_ptr< IntegerVariable[]> vars
absl::Span< const IntegerValue > CoeffsAsSpan() const
absl::Span< const IntegerVariable > VarsAsSpan() const