Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
linear_programming_constraint.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <array>
18#include <cmath>
19#include <cstddef>
20#include <cstdint>
21#include <functional>
22#include <limits>
23#include <memory>
24#include <numeric>
25#include <optional>
26#include <string>
27#include <utility>
28#include <vector>
29
30#include "absl/container/flat_hash_map.h"
31#include "absl/log/check.h"
32#include "absl/numeric/int128.h"
33#include "absl/strings/str_cat.h"
34#include "absl/strings/string_view.h"
35#include "absl/types/span.h"
40#include "ortools/glop/parameters.pb.h"
42#include "ortools/glop/status.h"
50#include "ortools/sat/cuts.h"
52#include "ortools/sat/integer.h"
56#include "ortools/sat/model.h"
58#include "ortools/sat/sat_parameters.pb.h"
61#include "ortools/sat/util.h"
63#include "ortools/util/bitset.h"
64#include "ortools/util/rev.h"
68
69namespace operations_research {
70namespace sat {
71
72using glop::ColIndex;
74using glop::RowIndex;
75
77 if (is_sparse_) {
78 for (const glop::ColIndex col : non_zeros_) {
79 dense_vector_[col] = IntegerValue(0);
80 }
81 dense_vector_.resize(size, IntegerValue(0));
82 } else {
83 dense_vector_.assign(size, IntegerValue(0));
84 }
85 for (const glop::ColIndex col : non_zeros_) {
86 is_zeros_[col] = true;
87 }
88 is_zeros_.resize(size, true);
89 non_zeros_.clear();
90 is_sparse_ = true;
91}
92
93bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) {
94 const int64_t add = CapAdd(value.value(), dense_vector_[col].value());
95 if (add == std::numeric_limits<int64_t>::min() ||
96 add == std::numeric_limits<int64_t>::max())
97 return false;
98 dense_vector_[col] = IntegerValue(add);
99 if (is_sparse_ && is_zeros_[col]) {
100 is_zeros_[col] = false;
101 non_zeros_.push_back(col);
102 }
103 return true;
104}
105
106template <bool check_overflow>
108 const IntegerValue multiplier, absl::Span<const glop::ColIndex> cols,
109 absl::Span<const IntegerValue> coeffs) {
110 const double threshold = 0.1 * static_cast<double>(dense_vector_.size());
111 const int num_terms = cols.size();
112 if (is_sparse_ && static_cast<double>(num_terms) < threshold) {
113 for (int i = 0; i < num_terms; ++i) {
114 if (is_zeros_[cols[i]]) {
115 is_zeros_[cols[i]] = false;
116 non_zeros_.push_back(cols[i]);
117 }
118 if (check_overflow) {
119 if (!AddProductTo(multiplier, coeffs[i], &dense_vector_[cols[i]])) {
120 return false;
121 }
122 } else {
123 dense_vector_[cols[i]] += multiplier * coeffs[i];
124 }
125 }
126 if (static_cast<double>(non_zeros_.size()) > threshold) {
127 is_sparse_ = false;
128 }
129 } else {
130 is_sparse_ = false;
131 for (int i = 0; i < num_terms; ++i) {
132 if (check_overflow) {
133 if (!AddProductTo(multiplier, coeffs[i], &dense_vector_[cols[i]])) {
134 return false;
135 }
136 } else {
137 dense_vector_[cols[i]] += multiplier * coeffs[i];
138 }
139 }
140 }
141 return true;
142}
143
145 absl::Span<const IntegerVariable> integer_variables,
146 IntegerValue upper_bound,
147 std::optional<std::pair<IntegerVariable, IntegerValue>> extra_term) {
148 // We first do one pass to compute the exact size and not overallocate.
149 int final_size = 0;
150 if (is_sparse_) {
151 for (const glop::ColIndex col : non_zeros_) {
152 const IntegerValue coeff = dense_vector_[col];
153 if (coeff == 0) continue;
154 ++final_size;
155 }
156 } else {
157 for (const IntegerValue coeff : dense_vector_) {
158 if (coeff != 0) ++final_size;
159 }
160 }
161 if (extra_term != std::nullopt) ++final_size;
162
163 // Allocate once.
164 LinearConstraint result;
165 result.resize(final_size);
166
167 // Copy terms.
168 int new_size = 0;
169 if (is_sparse_) {
170 std::sort(non_zeros_.begin(), non_zeros_.end());
171 for (const glop::ColIndex col : non_zeros_) {
172 const IntegerValue coeff = dense_vector_[col];
173 if (coeff == 0) continue;
174 result.vars[new_size] = integer_variables[col.value()];
175 result.coeffs[new_size] = coeff;
176 ++new_size;
177 }
178 } else {
179 const int size = dense_vector_.size();
180 for (glop::ColIndex col(0); col < size; ++col) {
181 const IntegerValue coeff = dense_vector_[col];
182 if (coeff == 0) continue;
183 result.vars[new_size] = integer_variables[col.value()];
184 result.coeffs[new_size] = coeff;
185 ++new_size;
186 }
187 }
188
189 result.lb = kMinIntegerValue;
190 result.ub = upper_bound;
191
192 if (extra_term != std::nullopt) {
193 result.vars[new_size] += extra_term->first;
194 result.coeffs[new_size] += extra_term->second;
195 ++new_size;
196 }
197
198 CHECK_EQ(new_size, final_size);
199 DivideByGCD(&result);
200 return result;
201}
202
204 absl::int128 rhs, absl::Span<const IntegerVariable> integer_variables,
205 absl::Span<const double> lp_solution, IntegerTrail* integer_trail,
206 CutData* result) {
207 result->terms.clear();
208 result->rhs = rhs;
209 if (is_sparse_) {
210 std::sort(non_zeros_.begin(), non_zeros_.end());
211 for (const glop::ColIndex col : non_zeros_) {
212 const IntegerValue coeff = dense_vector_[col];
213 if (coeff == 0) continue;
214 const IntegerVariable var = integer_variables[col.value()];
215 CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()],
216 integer_trail->LevelZeroLowerBound(var),
217 integer_trail->LevelZeroUpperBound(var)));
218 }
219 } else {
220 const int size = dense_vector_.size();
221 for (glop::ColIndex col(0); col < size; ++col) {
222 const IntegerValue coeff = dense_vector_[col];
223 if (coeff == 0) continue;
224 const IntegerVariable var = integer_variables[col.value()];
225 CHECK(result->AppendOneTerm(var, coeff, lp_solution[col.value()],
226 integer_trail->LevelZeroLowerBound(var),
227 integer_trail->LevelZeroUpperBound(var)));
228 }
229 }
230}
231
232std::vector<std::pair<glop::ColIndex, IntegerValue>>
234 std::vector<std::pair<glop::ColIndex, IntegerValue>> result;
235 if (is_sparse_) {
236 std::sort(non_zeros_.begin(), non_zeros_.end());
237 for (const glop::ColIndex col : non_zeros_) {
238 const IntegerValue coeff = dense_vector_[col];
239 if (coeff != 0) result.push_back({col, coeff});
240 }
241 } else {
242 const int size = dense_vector_.size();
243 for (glop::ColIndex col(0); col < size; ++col) {
244 const IntegerValue coeff = dense_vector_[col];
245 if (coeff != 0) result.push_back({col, coeff});
246 }
247 }
248 return result;
249}
250
251// TODO(user): make SatParameters singleton too, otherwise changing them after
252// a constraint was added will have no effect on this class.
254 Model* model, absl::Span<const IntegerVariable> vars)
255 : constraint_manager_(model),
256 parameters_(*(model->GetOrCreate<SatParameters>())),
257 model_(model),
258 time_limit_(model->GetOrCreate<TimeLimit>()),
259 integer_trail_(model->GetOrCreate<IntegerTrail>()),
260 trail_(model->GetOrCreate<Trail>()),
261 watcher_(model->GetOrCreate<GenericLiteralWatcher>()),
262 integer_encoder_(model->GetOrCreate<IntegerEncoder>()),
263 product_detector_(model->GetOrCreate<ProductDetector>()),
264 objective_definition_(model->GetOrCreate<ObjectiveDefinition>()),
265 shared_stats_(model->GetOrCreate<SharedStatistics>()),
266 shared_response_manager_(model->GetOrCreate<SharedResponseManager>()),
267 random_(model->GetOrCreate<ModelRandomGenerator>()),
268 rlt_cut_helper_(model),
269 implied_bounds_processor_({}, integer_trail_,
270 model->GetOrCreate<ImpliedBounds>()),
271 dispatcher_(model->GetOrCreate<LinearProgrammingDispatcher>()),
272 expanded_lp_solution_(*model->GetOrCreate<ModelLpValues>()) {
273 // Tweak the default parameters to make the solve incremental.
274 simplex_params_.set_use_dual_simplex(true);
275 simplex_params_.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING);
276 simplex_params_.set_primal_feasibility_tolerance(
277 parameters_.lp_primal_tolerance());
278 simplex_params_.set_dual_feasibility_tolerance(
279 parameters_.lp_dual_tolerance());
280 if (parameters_.use_exact_lp_reason()) {
281 simplex_params_.set_change_status_to_imprecise(false);
282 }
283 simplex_.SetParameters(simplex_params_);
284 if (parameters_.search_branching() == SatParameters::LP_SEARCH) {
285 compute_reduced_cost_averages_ = true;
286 }
287
288 // Register our local rev int repository.
289 integer_trail_->RegisterReversibleClass(&rc_rev_int_repository_);
290
291 // Register SharedStatistics with the cut helpers.
292 auto* stats = model->GetOrCreate<SharedStatistics>();
293 integer_rounding_cut_helper_.SetSharedStatistics(stats);
294 cover_cut_helper_.SetSharedStatistics(stats);
295
296 // Initialize the IntegerVariable -> ColIndex mapping.
297 CHECK(std::is_sorted(vars.begin(), vars.end()));
298
299 integer_variables_.assign(vars.begin(), vars.end());
300 ColIndex col{0};
301 for (const IntegerVariable positive_variable : vars) {
302 CHECK(VariableIsPositive(positive_variable));
303 implied_bounds_processor_.AddLpVariable(positive_variable);
304 (*dispatcher_)[positive_variable] = this;
305 mirror_lp_variable_[positive_variable] = col;
306
307 ++col;
308 }
309 lp_solution_.assign(vars.size(), std::numeric_limits<double>::infinity());
310 lp_reduced_cost_.assign(vars.size(), 0.0);
311
312 if (!vars.empty()) {
313 const int max_index = NegationOf(vars.back()).value();
314 if (max_index >= expanded_lp_solution_.size()) {
315 expanded_lp_solution_.assign(max_index + 1, 0.0);
316 }
317 }
318}
319
321 DCHECK(!lp_constraint_is_registered_);
322 constraint_manager_.Add(std::move(ct));
323}
324
325glop::ColIndex LinearProgrammingConstraint::GetMirrorVariable(
326 IntegerVariable positive_variable) {
327 DCHECK(VariableIsPositive(positive_variable));
328 return mirror_lp_variable_.at(positive_variable);
329}
330
332 IntegerValue coeff) {
333 CHECK(!lp_constraint_is_registered_);
334 objective_is_defined_ = true;
335 IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar);
336 if (ivar != pos_var) coeff = -coeff;
337
338 constraint_manager_.SetObjectiveCoefficient(pos_var, coeff);
339 const glop::ColIndex col = GetMirrorVariable(pos_var);
340 integer_objective_.push_back({col, coeff});
341 objective_infinity_norm_ =
342 std::max(objective_infinity_norm_, IntTypeAbs(coeff));
343}
344
345// TODO(user): As the search progress, some variables might get fixed. Exploit
346// this to reduce the number of variables in the LP and in the
347// ConstraintManager? We might also detect during the search that two variable
348// are equivalent.
349//
350// TODO(user): On TSP/VRP with a lot of cuts, this can take 20% of the overall
351// running time. We should be able to almost remove most of this from the
352// profile by being more incremental (modulo LP scaling).
353//
354// TODO(user): A longer term idea for LP with a lot of variables is to not
355// add all variables to each LP solve and do some "sifting". That can be useful
356// for TSP for instance where the number of edges is large, but only a small
357// fraction will be used in the optimal solution.
358bool LinearProgrammingConstraint::CreateLpFromConstraintManager() {
360
361 // Fill integer_lp_.
362 integer_lp_.clear();
363 integer_lp_cols_.clear();
364 integer_lp_coeffs_.clear();
365
366 infinity_norms_.clear();
367 const auto& all_constraints = constraint_manager_.AllConstraints();
368 for (const auto index : constraint_manager_.LpConstraints()) {
369 const LinearConstraint& ct = all_constraints[index].constraint;
370
371 integer_lp_.push_back(LinearConstraintInternal());
372 LinearConstraintInternal& new_ct = integer_lp_.back();
373 new_ct.lb = ct.lb;
374 new_ct.ub = ct.ub;
375 new_ct.lb_is_trivial = all_constraints[index].lb_is_trivial;
376 new_ct.ub_is_trivial = all_constraints[index].ub_is_trivial;
377 const int size = ct.num_terms;
378 if (ct.lb > ct.ub) {
379 VLOG(1) << "Trivial infeasible bound in an LP constraint";
380 return false;
381 }
382
383 IntegerValue infinity_norm = 0;
384 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.lb));
385 infinity_norm = std::max(infinity_norm, IntTypeAbs(ct.ub));
386 new_ct.start_in_buffer = integer_lp_cols_.size();
387 new_ct.num_terms = size;
388 for (int i = 0; i < size; ++i) {
389 // We only use positive variable inside this class.
390 const IntegerVariable var = ct.vars[i];
391 const IntegerValue coeff = ct.coeffs[i];
392 infinity_norm = std::max(infinity_norm, IntTypeAbs(coeff));
393 integer_lp_cols_.push_back(GetMirrorVariable(var));
394 integer_lp_coeffs_.push_back(coeff);
395 }
396 infinity_norms_.push_back(infinity_norm);
397
398 // It is important to keep lp_data_ "clean".
399 DCHECK(std::is_sorted(
400 integer_lp_cols_.data() + new_ct.start_in_buffer,
401 integer_lp_cols_.data() + new_ct.start_in_buffer + new_ct.num_terms));
402 }
403
404 // Copy the integer_lp_ into lp_data_.
405 lp_data_.Clear();
406 for (int i = 0; i < integer_variables_.size(); ++i) {
407 CHECK_EQ(glop::ColIndex(i), lp_data_.CreateNewVariable());
408 }
409
410 // We remove fixed variables from the objective. This should help the LP
411 // scaling, but also our integer reason computation.
412 int new_size = 0;
413 objective_infinity_norm_ = 0;
414 for (const auto& entry : integer_objective_) {
415 const IntegerVariable var = integer_variables_[entry.first.value()];
416 if (integer_trail_->IsFixedAtLevelZero(var)) {
417 integer_objective_offset_ +=
418 entry.second * integer_trail_->LevelZeroLowerBound(var);
419 continue;
420 }
421 objective_infinity_norm_ =
422 std::max(objective_infinity_norm_, IntTypeAbs(entry.second));
423 integer_objective_[new_size++] = entry;
424 lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second));
425 }
426 objective_infinity_norm_ =
427 std::max(objective_infinity_norm_, IntTypeAbs(integer_objective_offset_));
428 integer_objective_.resize(new_size);
429 lp_data_.SetObjectiveOffset(ToDouble(integer_objective_offset_));
430
431 for (const LinearConstraintInternal& ct : integer_lp_) {
432 const ConstraintIndex row = lp_data_.CreateNewConstraint();
433
434 // TODO(user): Using trivial bound might be good for things like
435 // sum bool <= 1 since setting the slack in [0, 1] can lead to bound flip in
436 // the simplex. However if the bound is large, maybe it make more sense to
437 // use +/- infinity.
438 const double infinity = std::numeric_limits<double>::infinity();
439 lp_data_.SetConstraintBounds(
440 row, ct.lb_is_trivial ? -infinity : ToDouble(ct.lb),
441 ct.ub_is_trivial ? +infinity : ToDouble(ct.ub));
442 for (int i = 0; i < ct.num_terms; ++i) {
443 const int index = ct.start_in_buffer + i;
444 lp_data_.SetCoefficient(row, integer_lp_cols_[index],
445 ToDouble(integer_lp_coeffs_[index]));
446 }
447 }
448 lp_data_.NotifyThatColumnsAreClean();
449
450 // We scale the LP using the level zero bounds that we later override
451 // with the current ones.
452 //
453 // TODO(user): As part of the scaling, we may also want to shift the initial
454 // variable bounds so that each variable contain the value zero in their
455 // domain. Maybe just once and for all at the beginning.
456 const int num_vars = integer_variables_.size();
457 for (int i = 0; i < num_vars; i++) {
458 const IntegerVariable cp_var = integer_variables_[i];
459 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(cp_var));
460 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(cp_var));
461 lp_data_.SetVariableBounds(glop::ColIndex(i), lb, ub);
462 }
463
464 // TODO(user): As we have an idea of the LP optimal after the first solves,
465 // maybe we can adapt the scaling accordingly.
466 scaler_.Scale(simplex_params_, &lp_data_);
467 UpdateBoundsOfLpVariables();
468
469 // Set the information for the step to polish the LP basis. All our variables
470 // are integer, but for now, we just try to minimize the fractionality of the
471 // binary variables.
472 if (parameters_.polish_lp_solution()) {
473 simplex_.ClearIntegralityScales();
474 for (int i = 0; i < num_vars; ++i) {
475 const IntegerVariable cp_var = integer_variables_[i];
476 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(cp_var);
477 const IntegerValue ub = integer_trail_->LevelZeroUpperBound(cp_var);
478 if (lb != 0 || ub != 1) continue;
479 simplex_.SetIntegralityScale(
480 glop::ColIndex(i),
481 1.0 / scaler_.VariableScalingFactor(glop::ColIndex(i)));
482 }
483 }
484
485 lp_data_.NotifyThatColumnsAreClean();
486 VLOG(3) << "LP relaxation: " << lp_data_.GetDimensionString() << ". "
487 << constraint_manager_.AllConstraints().size()
488 << " Managed constraints.";
489 return true;
490}
491
492void LinearProgrammingConstraint::FillReducedCostReasonIn(
493 const glop::DenseRow& reduced_costs,
494 std::vector<IntegerLiteral>* integer_reason) {
495 integer_reason->clear();
496 const int num_vars = integer_variables_.size();
497 for (int i = 0; i < num_vars; i++) {
498 const double rc = reduced_costs[glop::ColIndex(i)];
499 if (rc > kLpEpsilon) {
500 integer_reason->push_back(
501 integer_trail_->LowerBoundAsLiteral(integer_variables_[i]));
502 } else if (rc < -kLpEpsilon) {
503 integer_reason->push_back(
504 integer_trail_->UpperBoundAsLiteral(integer_variables_[i]));
505 }
506 }
507
508 integer_trail_->RemoveLevelZeroBounds(integer_reason);
509}
510
512 DCHECK(!lp_constraint_is_registered_);
513 lp_constraint_is_registered_ = true;
514 model->GetOrCreate<LinearProgrammingConstraintCollection>()->push_back(this);
515
516 // Note fdid, this is not really needed by should lead to better cache
517 // locality.
518 std::sort(integer_objective_.begin(), integer_objective_.end());
519
520 // Set the LP to its initial content.
521 //
522 // Note that we always add LP constraint lazily if we have A LOT of them.
523 // This is because currently on large problem with millions of constraints,
524 // our LP is usually not fast enough anyway.
525 if (!parameters_.add_lp_constraints_lazily() &&
526 constraint_manager_.num_constraints() < 1e6) {
527 constraint_manager_.AddAllConstraintsToLp();
528 }
529 if (!CreateLpFromConstraintManager()) {
530 model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
531 return;
532 }
533
534 watcher_id_ = watcher_->Register(this);
535 const int num_vars = integer_variables_.size();
536 for (int i = 0; i < num_vars; i++) {
537 watcher_->WatchIntegerVariable(integer_variables_[i], watcher_id_, i);
538 }
539 if (objective_is_defined_) {
540 watcher_->WatchUpperBound(objective_cp_, watcher_id_);
541 }
542 watcher_->SetPropagatorPriority(watcher_id_, 2);
543 watcher_->AlwaysCallAtLevelZero(watcher_id_);
544
545 // Registering it with the trail make sure this class is always in sync when
546 // it is used in the decision heuristics.
547 integer_trail_->RegisterReversibleClass(this);
548 watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_);
549}
550
552 // Get rid of all optimal constraint each time we go back to level zero.
553 if (level == 0) rev_optimal_constraints_size_ = 0;
554 optimal_constraints_.resize(rev_optimal_constraints_size_);
555 cumulative_optimal_constraint_sizes_.resize(rev_optimal_constraints_size_);
556 if (lp_solution_is_set_ && level < lp_solution_level_) {
557 lp_solution_is_set_ = false;
558 }
559
560 if (level < previous_level_) {
561 lp_at_optimal_ = false;
562 lp_objective_lower_bound_ = -std::numeric_limits<double>::infinity();
563 }
564 previous_level_ = level;
565
566 // Special case for level zero, we "reload" any previously known optimal
567 // solution from that level.
568 //
569 // TODO(user): Keep all optimal solution in the current branch?
570 // TODO(user): Still try to add cuts/constraints though!
571 // TODO(user): Reload the basis? This might cause issue with the basis
572 // saving/loading code in lb_tree_search.
573 if (level == 0 && !lp_solution_is_set_ && !level_zero_lp_solution_.empty()) {
574 lp_solution_is_set_ = true;
575 lp_solution_ = level_zero_lp_solution_;
576 lp_solution_level_ = 0;
577 for (int i = 0; i < lp_solution_.size(); i++) {
578 expanded_lp_solution_[integer_variables_[i]] = lp_solution_[i];
579 expanded_lp_solution_[NegationOf(integer_variables_[i])] =
580 -lp_solution_[i];
581 }
582 }
583}
584
586 cut_generators_.push_back(std::move(generator));
587}
588
590 const std::vector<int>& watch_indices) {
591 if (!enabled_) return true;
592
593 // If we have a really deep branch, with a lot of LP explanation constraint,
594 // we could take a quadratic amount of memory: O(num_var) per number of
595 // propagation in that branch. To avoid that, once the memory starts to be
596 // over a few GB, we only propagate from time to time. This way we do not need
597 // to keep that many constraints around.
598 //
599 // Note that 100 Millions int32_t variables, with the int128 coefficients and
600 // extra propagation vector is already a few GB.
601 if (!cumulative_optimal_constraint_sizes_.empty()) {
602 const double current_size =
603 static_cast<double>(cumulative_optimal_constraint_sizes_.back());
604 const double low_limit = 1e7;
605 if (current_size > low_limit) {
606 // We only propagate if we use less that 100 times the number of current
607 // integer literal enqueued.
608 const double num_enqueues = static_cast<double>(integer_trail_->Index());
609 if ((current_size - low_limit) > 100 * num_enqueues) return true;
610 }
611 }
612
613 if (!lp_solution_is_set_) {
614 return Propagate();
615 }
616
617 // At level zero, if there is still a chance to add cuts or lazy constraints,
618 // we re-run the LP.
619 if (trail_->CurrentDecisionLevel() == 0 && !lp_at_level_zero_is_final_) {
620 return Propagate();
621 }
622
623 // Check whether the change breaks the current LP solution. If it does, call
624 // Propagate() on the current LP.
625 for (const int index : watch_indices) {
626 const double lb =
627 ToDouble(integer_trail_->LowerBound(integer_variables_[index]));
628 const double ub =
629 ToDouble(integer_trail_->UpperBound(integer_variables_[index]));
630 const double value = lp_solution_[index];
631 if (value < lb - kCpEpsilon || value > ub + kCpEpsilon) return Propagate();
632 }
633
634 // TODO(user): The saved lp solution is still valid given the current variable
635 // bounds, so the LP optimal didn't change. However we might still want to add
636 // new cuts or new lazy constraints?
637 //
638 // TODO(user): Propagate the last optimal_constraint? Note that we need
639 // to be careful since the reversible int in IntegerSumLE are not registered.
640 // However, because we delete "optimalconstraints" on backtrack, we might not
641 // care.
642 //
643 // Remark: Note that if we do the sequence SetBasis() / Propagate() /
644 // GetAndSaveBasis() and we are in the case where the solution is still
645 // optimal, we should get the basis from when the lp solution was set which
646 // should be what we want. Even if we set garbage during SetBasis() it should
647 // be ignored. TODO(user): We might still have problem at level zero.
648 return true;
649}
650
651glop::Fractional LinearProgrammingConstraint::GetVariableValueAtCpScale(
652 glop::ColIndex var) {
653 return scaler_.UnscaleVariableValue(var, simplex_.GetVariableValue(var));
654}
655
657 IntegerVariable variable) const {
658 return lp_solution_[mirror_lp_variable_.at(variable).value()];
659}
660
662 IntegerVariable variable) const {
663 return lp_reduced_cost_[mirror_lp_variable_.at(variable).value()];
664}
665
666void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() {
667 const int num_vars = integer_variables_.size();
668 for (int i = 0; i < num_vars; i++) {
669 const IntegerVariable cp_var = integer_variables_[i];
670 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
671 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
672 const double factor = scaler_.VariableScalingFactor(glop::ColIndex(i));
673 lp_data_.SetVariableBounds(glop::ColIndex(i), lb * factor, ub * factor);
674 }
675}
676
677bool LinearProgrammingConstraint::SolveLp() {
678 const int level = trail_->CurrentDecisionLevel();
679 if (level == 0) {
680 lp_at_level_zero_is_final_ = false;
681 }
682
683 const auto status = simplex_.Solve(lp_data_, time_limit_);
684 state_ = simplex_.GetState();
685 total_num_simplex_iterations_ += simplex_.GetNumberOfIterations();
686 if (!status.ok()) {
687 VLOG(1) << "The LP solver encountered an error: " << status.error_message();
688 simplex_.ClearStateForNextSolve();
689 return false;
690 }
691 average_degeneracy_.AddData(CalculateDegeneracy());
692 if (average_degeneracy_.CurrentAverage() >= 1000.0) {
693 VLOG(2) << "High average degeneracy: "
694 << average_degeneracy_.CurrentAverage();
695 }
696
697 // By default we assume the matrix is unchanged.
698 // This will be reset by CreateLpFromConstraintManager().
700
701 const int status_as_int = static_cast<int>(simplex_.GetProblemStatus());
702 if (status_as_int >= num_solves_by_status_.size()) {
703 num_solves_by_status_.resize(status_as_int + 1);
704 }
705 num_solves_++;
706 num_solves_by_status_[status_as_int]++;
707 VLOG(2) << lp_data_.GetDimensionString()
708 << " lvl:" << trail_->CurrentDecisionLevel() << " "
709 << simplex_.GetProblemStatus()
710 << " iter:" << simplex_.GetNumberOfIterations()
711 << " obj:" << simplex_.GetObjectiveValue() << " scaled:"
712 << objective_definition_->ScaleObjective(
713 simplex_.GetObjectiveValue());
714
717 lp_objective_lower_bound_ = simplex_.GetObjectiveValue();
718 }
719 lp_at_optimal_ = simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL;
720
722 lp_solution_is_set_ = true;
723 lp_solution_level_ = trail_->CurrentDecisionLevel();
724 const int num_vars = integer_variables_.size();
725 for (int i = 0; i < num_vars; i++) {
726 const glop::Fractional value =
727 GetVariableValueAtCpScale(glop::ColIndex(i));
728 lp_solution_[i] = value;
729 expanded_lp_solution_[integer_variables_[i]] = value;
730 expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value;
731 }
732
733 if (lp_solution_level_ == 0) {
734 level_zero_lp_solution_ = lp_solution_;
735 }
736 }
737
738 return true;
739}
740
741bool LinearProgrammingConstraint::AnalyzeLp() {
742 // A dual-unbounded problem is infeasible. We use the dual ray reason.
744 if (parameters_.use_exact_lp_reason()) {
745 return PropagateExactDualRay();
746 }
747 FillReducedCostReasonIn(simplex_.GetDualRayRowCombination(),
748 &integer_reason_);
749 return integer_trail_->ReportConflict(integer_reason_);
750 }
751
752 // TODO(user): Update limits for DUAL_UNBOUNDED status as well.
753 UpdateSimplexIterationLimit(/*min_iter=*/10, /*max_iter=*/1000);
754
755 // Optimality deductions if problem has an objective.
756 // If there is no objective, then all duals will just be zero.
757 if (objective_is_defined_ &&
760 // TODO(user): Maybe do a bit less computation when we cannot propagate
761 // anything.
762 if (parameters_.use_exact_lp_reason()) {
763 if (!PropagateExactLpReason()) return false;
764
765 // Display when the inexact bound would have propagated more.
766 if (VLOG_IS_ON(2)) {
767 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
768 const IntegerValue approximate_new_lb(static_cast<int64_t>(
769 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
770 const IntegerValue propagated_lb =
771 integer_trail_->LowerBound(objective_cp_);
772 if (approximate_new_lb > propagated_lb) {
773 VLOG(2) << "LP objective [ " << ToDouble(propagated_lb) << ", "
774 << ToDouble(integer_trail_->UpperBound(objective_cp_))
775 << " ] approx_lb += "
776 << ToDouble(approximate_new_lb - propagated_lb) << " gap: "
777 << integer_trail_->UpperBound(objective_cp_) - propagated_lb;
778 }
779 }
780 } else {
781 // Try to filter optimal objective value. Note that GetObjectiveValue()
782 // already take care of the scaling so that it returns an objective in the
783 // CP world.
784 FillReducedCostReasonIn(simplex_.GetReducedCosts(), &integer_reason_);
785 const double objective_cp_ub =
786 ToDouble(integer_trail_->UpperBound(objective_cp_));
787 const double relaxed_optimal_objective = simplex_.GetObjectiveValue();
788 ReducedCostStrengtheningDeductions(objective_cp_ub -
789 relaxed_optimal_objective);
790 if (!deductions_.empty()) {
791 deductions_reason_ = integer_reason_;
792 deductions_reason_.push_back(
793 integer_trail_->UpperBoundAsLiteral(objective_cp_));
794 }
795
796 // Push new objective lb.
797 const IntegerValue approximate_new_lb(static_cast<int64_t>(
798 std::ceil(relaxed_optimal_objective - kCpEpsilon)));
799 if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
800 const IntegerLiteral deduction =
801 IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
802 if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
803 return false;
804 }
805 }
806
807 // Push reduced cost strengthening bounds.
808 if (!deductions_.empty()) {
809 const int trail_index_with_same_reason = integer_trail_->Index();
810 for (const IntegerLiteral deduction : deductions_) {
811 if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
812 trail_index_with_same_reason)) {
813 return false;
814 }
815 }
816 }
817 }
818 }
819
820 // Copy more info about the current solution.
822 CHECK(lp_solution_is_set_);
823 lp_solution_is_integer_ = true;
824 const int num_vars = integer_variables_.size();
825 for (int i = 0; i < num_vars; i++) {
826 lp_reduced_cost_[i] = scaler_.UnscaleReducedCost(
827 glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i)));
828 if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) >
829 kCpEpsilon) {
830 lp_solution_is_integer_ = false;
831 }
832 }
833
834 if (compute_reduced_cost_averages_) {
835 UpdateAverageReducedCosts();
836 }
837 }
838
839 // On some problem, LP solves and cut rounds can be slow, so we report
840 // the current possible objective improvement in the middle of the
841 // propagation, not just at the end.
842 //
843 // Note that we currently only do that if the LP is "full" and its objective
844 // is the same as the one of the whole problem.
845 if (objective_is_defined_ &&
846 objective_definition_->objective_var == objective_cp_ &&
847 trail_->CurrentDecisionLevel() == 0) {
848 shared_response_manager_->UpdateInnerObjectiveBounds(
849 model_->Name(), integer_trail_->LowerBound(objective_cp_),
850 integer_trail_->UpperBound(objective_cp_));
851 }
852
853 return true;
854}
855
856// Note that since we call this on the constraint with slack, we actually have
857// linear expression == rhs, we can use this to propagate more!
858//
859// TODO(user): Also propagate on -cut ? in practice we already do that in many
860// places were we try to generate the cut on -cut... But we coould do it sooner
861// and more cleanly here.
862bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack,
863 CutData* cut) {
864 // Because of complement, all coeffs and all terms are positive after this.
865 cut->ComplementForPositiveCoefficients();
866 if (cut->rhs < 0) {
867 problem_proven_infeasible_by_cuts_ = true;
868 return false;
869 }
870
871 // Limited DP to compute first few reachable values.
872 // Note that all coeff are positive.
873 reachable_.Reset();
874 for (const CutTerm& term : cut->terms) {
875 reachable_.Add(term.coeff.value());
876 }
877
878 // Extra propag since we know it is actually an equality constraint.
879 if (cut->rhs < absl::int128(reachable_.LastValue()) &&
880 !reachable_.MightBeReachable(static_cast<int64_t>(cut->rhs))) {
881 problem_proven_infeasible_by_cuts_ = true;
882 return false;
883 }
884
885 bool some_fixed_terms = false;
886 bool some_relevant_positions = false;
887 for (CutTerm& term : cut->terms) {
888 const absl::int128 magnitude128 = term.coeff.value();
889 const absl::int128 range =
890 absl::int128(term.bound_diff.value()) * magnitude128;
891
892 IntegerValue new_diff = term.bound_diff;
893 if (range > cut->rhs) {
894 new_diff = static_cast<int64_t>(cut->rhs / magnitude128);
895 }
896 {
897 // Extra propag since we know it is actually an equality constraint.
898 absl::int128 rest128 =
899 cut->rhs - absl::int128(new_diff.value()) * magnitude128;
900 while (rest128 < absl::int128(reachable_.LastValue()) &&
901 !reachable_.MightBeReachable(static_cast<int64_t>(rest128))) {
902 ++total_num_eq_propagations_;
903 CHECK_GT(new_diff, 0);
904 --new_diff;
905 rest128 += magnitude128;
906 }
907 }
908
909 if (new_diff < term.bound_diff) {
910 term.bound_diff = new_diff;
911
912 const IntegerVariable var = term.expr_vars[0];
913 if (var < first_slack) {
914 // Normal variable.
915 ++total_num_cut_propagations_;
916
917 // Note that at this stage we only have X - lb or ub - X.
918 if (term.expr_coeffs[0] == 1) {
919 // X + offset <= bound_diff
920 if (!integer_trail_->Enqueue(
922 var, term.bound_diff - term.expr_offset),
923 {}, {})) {
924 problem_proven_infeasible_by_cuts_ = true;
925 return false;
926 }
927 } else {
928 CHECK_EQ(term.expr_coeffs[0], -1);
929 // offset - X <= bound_diff
930 if (!integer_trail_->Enqueue(
932 var, term.expr_offset - term.bound_diff),
933 {}, {})) {
934 problem_proven_infeasible_by_cuts_ = true;
935 return false;
936 }
937 }
938 } else {
939 // This is a tighter bound on one of the constraint! like a cut. Note
940 // that in some corner case, new cut can be merged and update the bounds
941 // of the constraint before this code.
942 const int slack_index = (var.value() - first_slack.value()) / 2;
943 const glop::RowIndex row = tmp_slack_rows_[slack_index];
944 if (term.expr_coeffs[0] == 1) {
945 // slack = ct + offset <= bound_diff;
946 const IntegerValue new_ub = term.bound_diff - term.expr_offset;
947 if (constraint_manager_.UpdateConstraintUb(row, new_ub)) {
948 integer_lp_[row].ub = new_ub;
949 }
950 } else {
951 // slack = offset - ct <= bound_diff;
952 CHECK_EQ(term.expr_coeffs[0], -1);
953 const IntegerValue new_lb = term.expr_offset - term.bound_diff;
954 if (constraint_manager_.UpdateConstraintLb(row, new_lb)) {
955 integer_lp_[row].lb = new_lb;
956 }
957 }
958 }
959 }
960
961 if (term.bound_diff == 0) {
962 some_fixed_terms = true;
963 } else {
964 if (term.HasRelevantLpValue()) {
965 some_relevant_positions = true;
966 }
967 }
968 }
969
970 // Remove fixed terms if any.
971 if (some_fixed_terms) {
972 int new_size = 0;
973 for (const CutTerm& term : cut->terms) {
974 if (term.bound_diff == 0) continue;
975 cut->terms[new_size++] = term;
976 }
977 cut->terms.resize(new_size);
978 }
979 return some_relevant_positions;
980}
981
982bool LinearProgrammingConstraint::AddCutFromConstraints(
983 absl::string_view name,
984 absl::Span<const std::pair<RowIndex, IntegerValue>> integer_multipliers) {
985 // This is initialized to a valid linear constraint (by taking linear
986 // combination of the LP rows) and will be transformed into a cut if
987 // possible.
988 //
989 // TODO(user): For CG cuts, Ideally this linear combination should have only
990 // one fractional variable (basis_col). But because of imprecision, we can get
991 // a bunch of fractional entry with small coefficient (relative to the one of
992 // basis_col). We try to handle that in IntegerRoundingCut(), but it might be
993 // better to add small multiple of the involved rows to get rid of them.
994 IntegerValue cut_ub;
995 if (!ComputeNewLinearConstraint(integer_multipliers, &tmp_scattered_vector_,
996 &cut_ub)) {
997 ++num_cut_overflows_;
998 VLOG(1) << "Issue, overflow!";
999 return false;
1000 }
1001
1002 // Important: because we use integer_multipliers below to create the slack,
1003 // we cannot try to adjust this linear combination easily.
1004 //
1005 // TODO(user): the conversion col_index -> IntegerVariable is slow and could
1006 // in principle be removed. Easy for cuts, but not so much for
1007 // implied_bounds_processor_. Note that in theory this could allow us to
1008 // use Literal directly without the need to have an IntegerVariable for them.
1009 tmp_scattered_vector_.ConvertToCutData(cut_ub.value(), integer_variables_,
1010 lp_solution_, integer_trail_,
1011 &base_ct_);
1012
1013 // If there are no integer (all Booleans), no need to try implied bounds
1014 // heurititics. By setting this to nullptr, we are a bit faster.
1015 ImpliedBoundsProcessor* ib_processor = nullptr;
1016 {
1017 bool some_ints = false;
1018 bool some_relevant_positions = false;
1019 for (const CutTerm& term : base_ct_.terms) {
1020 if (term.bound_diff > 1) some_ints = true;
1021 if (term.HasRelevantLpValue()) some_relevant_positions = true;
1022 }
1023
1024 // If all value are integer, we will not be able to cut anything.
1025 if (!some_relevant_positions) return false;
1026 if (some_ints) ib_processor = &implied_bounds_processor_;
1027 }
1028
1029 // Add constraint slack.
1030 // This is important for correctness here.
1031 const IntegerVariable first_slack(expanded_lp_solution_.size());
1032 CHECK_EQ(first_slack.value() % 2, 0);
1033 tmp_slack_rows_.clear();
1034 for (const auto [row, coeff] : integer_multipliers) {
1035 if (integer_lp_[row].lb == integer_lp_[row].ub) continue;
1036
1037 CutTerm entry;
1038 entry.coeff = coeff > 0 ? coeff : -coeff;
1039 entry.lp_value = 0.0;
1040 entry.bound_diff = integer_lp_[row].ub - integer_lp_[row].lb;
1041 entry.expr_vars[0] =
1042 first_slack + 2 * IntegerVariable(tmp_slack_rows_.size());
1043 entry.expr_coeffs[1] = 0;
1044 const double activity = scaler_.UnscaleConstraintActivity(
1045 row, simplex_.GetConstraintActivity(row));
1046 if (coeff > 0) {
1047 // Slack = ub - constraint;
1048 entry.lp_value = ToDouble(integer_lp_[row].ub) - activity;
1049 entry.expr_coeffs[0] = IntegerValue(-1);
1050 entry.expr_offset = integer_lp_[row].ub;
1051 } else {
1052 // Slack = constraint - lb;
1053 entry.lp_value = activity - ToDouble(integer_lp_[row].lb);
1054 entry.expr_coeffs[0] = IntegerValue(1);
1055 entry.expr_offset = -integer_lp_[row].lb;
1056 }
1057
1058 base_ct_.terms.push_back(entry);
1059 tmp_slack_rows_.push_back(row);
1060 }
1061
1062 // This also make all coefficients positive.
1063 if (!PreprocessCut(first_slack, &base_ct_)) return false;
1064
1065 // We cannot cut sum Bool <= 1.
1066 if (base_ct_.rhs == 1) return false;
1067
1068 // Constraint with slack should be tight.
1069 if (DEBUG_MODE) {
1070 double activity = 0.0;
1071 double norm = 0.0;
1072 for (const CutTerm& term : base_ct_.terms) {
1073 const double coeff = ToDouble(term.coeff);
1074 activity += term.lp_value * coeff;
1075 norm += coeff * coeff;
1076 }
1077 if (std::abs(activity - static_cast<double>(base_ct_.rhs)) / norm > 1e-4) {
1078 VLOG(1) << "Cut not tight " << activity
1079 << " <= " << static_cast<double>(base_ct_.rhs);
1080 return false;
1081 }
1082 }
1083
1084 bool at_least_one_added = false;
1085 DCHECK(base_ct_.AllCoefficientsArePositive());
1086
1087 // Try RLT cuts.
1088 //
1089 // TODO(user): try this for more than just "base" constraints?
1090 if (integer_multipliers.size() == 1 && parameters_.add_rlt_cuts()) {
1091 if (rlt_cut_helper_.TrySimpleSeparation(base_ct_)) {
1092 at_least_one_added |= PostprocessAndAddCut(
1093 absl::StrCat(name, "_RLT"), rlt_cut_helper_.Info(), first_slack,
1094 rlt_cut_helper_.cut());
1095 }
1096 }
1097
1098 // Note that the indexing will survive ComplementForSmallerLpValues() below.
1099 if (ib_processor != nullptr) {
1100 if (!ib_processor->CacheDataForCut(first_slack, &base_ct_)) {
1101 ib_processor = nullptr;
1102 }
1103 }
1104
1105 // Try cover approach to find cut.
1106 // TODO(user): Share common computation between kinds.
1107 {
1108 cover_cut_helper_.ClearCache();
1109
1110 if (cover_cut_helper_.TrySingleNodeFlow(base_ct_, ib_processor)) {
1111 at_least_one_added |= PostprocessAndAddCut(
1112 absl::StrCat(name, "_FF"), cover_cut_helper_.Info(), first_slack,
1113 cover_cut_helper_.cut());
1114 }
1115 if (cover_cut_helper_.TrySimpleKnapsack(base_ct_, ib_processor)) {
1116 at_least_one_added |= PostprocessAndAddCut(
1117 absl::StrCat(name, "_K"), cover_cut_helper_.Info(), first_slack,
1118 cover_cut_helper_.cut());
1119 }
1120
1121 // This one need to be called after TrySimpleKnapsack() in order to reuse
1122 // some cached data if possible.
1123 if (cover_cut_helper_.TryWithLetchfordSouliLifting(base_ct_,
1124 ib_processor)) {
1125 at_least_one_added |= PostprocessAndAddCut(
1126 absl::StrCat(name, "_KL"), cover_cut_helper_.Info(), first_slack,
1127 cover_cut_helper_.cut());
1128 }
1129 }
1130
1131 // Try integer rounding heuristic to find cut.
1132 {
1134
1135 RoundingOptions options;
1136 options.max_scaling = parameters_.max_integer_rounding_scaling();
1137 options.use_ib_before_heuristic = false;
1138 if (integer_rounding_cut_helper_.ComputeCut(options, base_ct_,
1139 ib_processor)) {
1140 at_least_one_added |= PostprocessAndAddCut(
1141 absl::StrCat(name, "_R"), integer_rounding_cut_helper_.Info(),
1142 first_slack, integer_rounding_cut_helper_.cut());
1143 }
1144
1145 options.use_ib_before_heuristic = true;
1146 options.prefer_positive_ib = false;
1147 if (ib_processor != nullptr && integer_rounding_cut_helper_.ComputeCut(
1148 options, base_ct_, ib_processor)) {
1149 at_least_one_added |= PostprocessAndAddCut(
1150 absl::StrCat(name, "_RB"), integer_rounding_cut_helper_.Info(),
1151 first_slack, integer_rounding_cut_helper_.cut());
1152 }
1153
1154 options.use_ib_before_heuristic = true;
1155 options.prefer_positive_ib = true;
1156 if (ib_processor != nullptr && integer_rounding_cut_helper_.ComputeCut(
1157 options, base_ct_, ib_processor)) {
1158 at_least_one_added |= PostprocessAndAddCut(
1159 absl::StrCat(name, "_RBP"), integer_rounding_cut_helper_.Info(),
1160 first_slack, integer_rounding_cut_helper_.cut());
1161 }
1162 }
1163
1164 return at_least_one_added;
1165}
1166
1167bool LinearProgrammingConstraint::PostprocessAndAddCut(
1168 const std::string& name, const std::string& info,
1169 IntegerVariable first_slack, const CutData& cut) {
1170 if (cut.rhs > absl::int128(std::numeric_limits<int64_t>::max()) ||
1171 cut.rhs < absl::int128(std::numeric_limits<int64_t>::min())) {
1172 VLOG(2) << "RHS overflow " << name << " " << info;
1173 ++num_cut_overflows_;
1174 return false;
1175 }
1176
1177 // We test this here to avoid doing the costly conversion below.
1178 //
1179 // TODO(user): Ideally we should detect this even earlier during the cut
1180 // generation.
1181 if (cut.ComputeViolation() < 1e-4) {
1182 VLOG(2) << "Bad cut " << name << " " << info;
1183 ++num_bad_cuts_;
1184 return false;
1185 }
1186
1187 // Substitute any slack left.
1188 tmp_scattered_vector_.ClearAndResize(integer_variables_.size());
1189 IntegerValue cut_ub = static_cast<int64_t>(cut.rhs);
1190 for (const CutTerm& term : cut.terms) {
1191 if (term.coeff == 0) continue;
1192 if (!AddProductTo(-term.coeff, term.expr_offset, &cut_ub)) {
1193 VLOG(2) << "Overflow in conversion";
1194 ++num_cut_overflows_;
1195 return false;
1196 }
1197 for (int i = 0; i < 2; ++i) {
1198 if (term.expr_coeffs[i] == 0) continue;
1199 const IntegerVariable var = term.expr_vars[i];
1200 const IntegerValue coeff =
1201 CapProd(term.coeff.value(), term.expr_coeffs[i].value());
1202 if (AtMinOrMaxInt64(coeff.value())) {
1203 VLOG(2) << "Overflow in conversion";
1204 ++num_cut_overflows_;
1205 return false;
1206 }
1207
1208 // Simple copy for non-slack variables.
1209 if (var < first_slack) {
1210 const glop::ColIndex col =
1211 mirror_lp_variable_.at(PositiveVariable(var));
1212 if (VariableIsPositive(var)) {
1213 tmp_scattered_vector_.Add(col, coeff);
1214 } else {
1215 tmp_scattered_vector_.Add(col, -coeff);
1216 }
1217 continue;
1218 } else {
1219 // Replace slack from LP constraints.
1220 const int slack_index = (var.value() - first_slack.value()) / 2;
1221 const glop::RowIndex row = tmp_slack_rows_[slack_index];
1222 if (!tmp_scattered_vector_.AddLinearExpressionMultiple(
1223 coeff, IntegerLpRowCols(row), IntegerLpRowCoeffs(row))) {
1224 VLOG(2) << "Overflow in slack removal";
1225 ++num_cut_overflows_;
1226 return false;
1227 }
1228 }
1229 }
1230 }
1231
1232 // TODO(user): avoid allocating memory if it turns out this is a duplicate of
1233 // something we already added. This tends to happen if the duplicate was
1234 // already a generated cut which is currently not part of the LP.
1235 LinearConstraint converted_cut =
1236 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_,
1237 cut_ub);
1238
1239 // TODO(user): We probably generate too many cuts, keep best one only?
1240 // Note that we do need duplicate removal and maybe some orthogonality here?
1241 if (/*DISABLES CODE*/ (false)) {
1242 top_n_cuts_.AddCut(std::move(converted_cut), name, expanded_lp_solution_);
1243 return true;
1244 }
1245 return constraint_manager_.AddCut(std::move(converted_cut), name, info);
1246}
1247
1248// TODO(user): This can be still too slow on some problems like
1249// 30_70_45_05_100.mps.gz. Not this actual function, but the set of computation
1250// it triggers. We should add heuristics to abort earlier if a cut is not
1251// promising. Or only test a few positions and not all rows.
1252void LinearProgrammingConstraint::AddCGCuts() {
1253 // We used not to do "classical" gomory and instead used this heuristic.
1254 // It is usually faster but on some problem like neos*creuse, this do not find
1255 // good cut though.
1256 //
1257 // TODO(user): Make the cut generation lighter and try this at false.
1258 const bool old_gomory = true;
1259
1260 // Note that the index is permuted and do not correspond to a row.
1261 const RowIndex num_rows = lp_data_.num_constraints();
1262 for (RowIndex index(0); index < num_rows; ++index) {
1263 if (time_limit_->LimitReached()) break;
1264
1265 const ColIndex basis_col = simplex_.GetBasis(index);
1266 const Fractional lp_value = GetVariableValueAtCpScale(basis_col);
1267
1268 // Only consider fractional basis element. We ignore element that are close
1269 // to an integer to reduce the amount of positions we try.
1270 //
1271 // TODO(user): We could just look at the diff with std::floor() in the hope
1272 // that when we are just under an integer, the exact computation below will
1273 // also be just under it.
1274 if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue;
1275
1276 // If this variable is a slack, we ignore it. This is because the
1277 // corresponding row is not tight under the given lp values.
1278 if (old_gomory && basis_col >= integer_variables_.size()) continue;
1279
1280 // TODO(user): Avoid code duplication between the sparse/dense path.
1281 tmp_lp_multipliers_.clear();
1282 const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(index);
1283 if (lambda.non_zeros.empty()) {
1284 for (RowIndex row(0); row < num_rows; ++row) {
1285 const double value = lambda.values[glop::RowToColIndex(row)];
1286 if (std::abs(value) < kZeroTolerance) continue;
1287 tmp_lp_multipliers_.push_back({row, value});
1288 }
1289 } else {
1290 for (const ColIndex col : lambda.non_zeros) {
1291 const RowIndex row = glop::ColToRowIndex(col);
1292 const double value = lambda.values[col];
1293 if (std::abs(value) < kZeroTolerance) continue;
1294 tmp_lp_multipliers_.push_back({row, value});
1295 }
1296 }
1297
1298 // Size 1 is already done with MIR.
1299 if (tmp_lp_multipliers_.size() <= 1) continue;
1300
1301 IntegerValue scaling;
1302 for (int i = 0; i < 2; ++i) {
1303 if (i == 1) {
1304 // Try other sign.
1305 //
1306 // TODO(user): Maybe add an heuristic to know beforehand which sign to
1307 // use?
1308 for (std::pair<RowIndex, double>& p : tmp_lp_multipliers_) {
1309 p.second = -p.second;
1310 }
1311 }
1312
1313 // TODO(user): We use a lower value here otherwise we might run into
1314 // overflow while computing the cut. This should be fixable.
1315 tmp_integer_multipliers_ = ScaleLpMultiplier(
1316 /*take_objective_into_account=*/false,
1317 /*ignore_trivial_constraints=*/old_gomory, tmp_lp_multipliers_,
1318 &scaling);
1319 if (scaling != 0) {
1320 AddCutFromConstraints("CG", tmp_integer_multipliers_);
1321 }
1322 }
1323 }
1324}
1325
1326// Because we know the objective is integer, the constraint objective >= lb can
1327// sometime cut the current lp optimal, and it can make a big difference to add
1328// it. Or at least use it when constructing more advanced cuts. See
1329// 'multisetcover_batch_0_case_115_instance_0_small_subset_elements_3_sumreqs
1330// _1295_candidates_41.fzn'
1331//
1332// TODO(user): It might be better to just integrate this with the MIR code so
1333// that we not only consider MIR1 involving the objective but we also consider
1334// combining it with other constraints.
1335void LinearProgrammingConstraint::AddObjectiveCut() {
1336 if (integer_objective_.size() <= 1) return;
1337
1338 // We only try to add such cut if the LB objective is "far" from the current
1339 // objective lower bound. Note that this is in term of the "internal" integer
1340 // objective.
1341 const double obj_lp_value = simplex_.GetObjectiveValue();
1342 const IntegerValue obj_lower_bound =
1343 integer_trail_->LevelZeroLowerBound(objective_cp_);
1344 if (obj_lp_value + 1.0 >= ToDouble(obj_lower_bound)) return;
1345
1346 // We negate everything to have a <= base constraint.
1347 LinearConstraint objective_ct;
1348 objective_ct.lb = kMinIntegerValue;
1349 objective_ct.ub = integer_objective_offset_ -
1350 integer_trail_->LevelZeroLowerBound(objective_cp_);
1351 IntegerValue obj_coeff_magnitude(0);
1352 objective_ct.resize(integer_objective_.size());
1353 int i = 0;
1354 for (const auto& [col, coeff] : integer_objective_) {
1355 const IntegerVariable var = integer_variables_[col.value()];
1356 objective_ct.vars[i] = var;
1357 objective_ct.coeffs[i] = -coeff;
1358 obj_coeff_magnitude = std::max(obj_coeff_magnitude, IntTypeAbs(coeff));
1359 ++i;
1360 }
1361
1362 if (!base_ct_.FillFromLinearConstraint(objective_ct, expanded_lp_solution_,
1363 integer_trail_)) {
1364 return;
1365 }
1366
1367 // If the magnitude is small enough, just try to add the full objective. Other
1368 // cuts will be derived in subsequent passes. Otherwise, try normal cut
1369 // heuristic that should result in a cut with reasonable coefficients.
1370 if (obj_coeff_magnitude < 1e9 &&
1371 constraint_manager_.AddCut(std::move(objective_ct), "Objective")) {
1372 return;
1373 }
1374
1375 // If there are no integer (all Booleans), no need to try implied bounds
1376 // heurititics. By setting this to nullptr, we are a bit faster.
1377 ImpliedBoundsProcessor* ib_processor = nullptr;
1378 {
1379 bool some_ints = false;
1380 bool some_relevant_positions = false;
1381 for (const CutTerm& term : base_ct_.terms) {
1382 if (term.bound_diff > 1) some_ints = true;
1383 if (term.HasRelevantLpValue()) some_relevant_positions = true;
1384 }
1385
1386 // If all value are integer, we will not be able to cut anything.
1387 if (!some_relevant_positions) return;
1388 if (some_ints) ib_processor = &implied_bounds_processor_;
1389 }
1390
1391 // Note that the indexing will survive the complement of terms below.
1392 const IntegerVariable first_slack(
1393 std::numeric_limits<IntegerVariable::ValueType>::max());
1394 if (ib_processor != nullptr) {
1395 if (!ib_processor->CacheDataForCut(first_slack, &base_ct_)) {
1396 ib_processor = nullptr;
1397 }
1398 }
1399
1400 // Try knapsack.
1402 cover_cut_helper_.ClearCache();
1403 if (cover_cut_helper_.TrySimpleKnapsack(base_ct_, ib_processor)) {
1404 PostprocessAndAddCut("Objective_K", cover_cut_helper_.Info(), first_slack,
1405 cover_cut_helper_.cut());
1406 }
1407
1408 // Try rounding.
1409 RoundingOptions options;
1410 options.max_scaling = parameters_.max_integer_rounding_scaling();
1412 if (integer_rounding_cut_helper_.ComputeCut(options, base_ct_,
1413 ib_processor)) {
1414 PostprocessAndAddCut("Objective_R", integer_rounding_cut_helper_.Info(),
1415 first_slack, integer_rounding_cut_helper_.cut());
1416 }
1417}
1418
1419void LinearProgrammingConstraint::AddMirCuts() {
1420 // Heuristic to generate MIR_n cuts by combining a small number of rows. This
1421 // works greedily and follow more or less the MIR cut description in the
1422 // literature. We have a current cut, and we add one more row to it while
1423 // eliminating a variable of the current cut whose LP value is far from its
1424 // bound.
1425 //
1426 // A notable difference is that we randomize the variable we eliminate and
1427 // the row we use to do so. We still have weights to indicate our preferred
1428 // choices. This allows to generate different cuts when called again and
1429 // again.
1430 //
1431 // TODO(user): We could combine n rows to make sure we eliminate n variables
1432 // far away from their bounds by solving exactly in integer small linear
1433 // system.
1435 integer_variables_.size(), IntegerValue(0));
1436 SparseBitset<ColIndex> non_zeros_(ColIndex(integer_variables_.size()));
1437
1438 // We compute all the rows that are tight, these will be used as the base row
1439 // for the MIR_n procedure below.
1440 const int num_rows = lp_data_.num_constraints().value();
1441 std::vector<std::pair<RowIndex, IntegerValue>> base_rows;
1442 util_intops::StrongVector<RowIndex, double> row_weights(num_rows, 0.0);
1443 util_intops::StrongVector<RowIndex, bool> at_ub(num_rows, false);
1444 util_intops::StrongVector<RowIndex, bool> at_lb(num_rows, false);
1445 for (RowIndex row(0); row < num_rows; ++row) {
1446 // We only consider tight rows.
1447 // We use both the status and activity to have as much options as possible.
1448 //
1449 // TODO(user): shall we consider rows that are not tight?
1450 // TODO(user): Ignore trivial rows? Note that we do that for MIR1 since it
1451 // cannot be good.
1452 const auto status = simplex_.GetConstraintStatus(row);
1453 const double activity = simplex_.GetConstraintActivity(row);
1454 if (activity > lp_data_.constraint_upper_bounds()[row] - 1e-4 ||
1457 at_ub[row] = true;
1458 base_rows.push_back({row, IntegerValue(1)});
1459 }
1460 if (activity < lp_data_.constraint_lower_bounds()[row] + 1e-4 ||
1463 at_lb[row] = true;
1464 base_rows.push_back({row, IntegerValue(-1)});
1465 }
1466
1467 // For now, we use the dual values for the row "weights".
1468 //
1469 // Note that we use the dual at LP scale so that it make more sense when we
1470 // compare different rows since the LP has been scaled.
1471 //
1472 // TODO(user): In Kati Wolter PhD "Implementation of Cutting Plane
1473 // Separators for Mixed Integer Programs" which describe SCIP's MIR cuts
1474 // implementation (or at least an early version of it), a more complex score
1475 // is used.
1476 //
1477 // Note(user): Because we only consider tight rows under the current lp
1478 // solution (i.e. non-basic rows), most should have a non-zero dual values.
1479 // But there is some degenerate problem where these rows have a really low
1480 // weight (or even zero), and having only weight of exactly zero in
1481 // std::discrete_distribution will result in a crash.
1482 row_weights[row] = std::max(1e-8, std::abs(simplex_.GetDualValue(row)));
1483 }
1484
1485 // The code here can be really slow, so we put a limit on the number of
1486 // entries we process. We randomize the base_rows so that on the next calls
1487 // we do not do exactly the same if we can't process many base row.
1488 int64_t dtime_num_entries = 0;
1489 std::shuffle(base_rows.begin(), base_rows.end(), *random_);
1490
1491 std::vector<double> weights;
1493 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1494 for (const std::pair<RowIndex, IntegerValue>& entry : base_rows) {
1495 if (time_limit_->LimitReached()) break;
1496 if (dtime_num_entries > 1e7) break;
1497
1498 const glop::RowIndex base_row = entry.first;
1499 const IntegerValue multiplier = entry.second;
1500
1501 // First try to generate a cut directly from this base row (MIR1).
1502 //
1503 // Note(user): We abort on success like it seems to be done in the
1504 // literature. Note that we don't succeed that often in generating an
1505 // efficient cut, so I am not sure aborting will make a big difference
1506 // speedwise. We might generate similar cuts though, but hopefully the cut
1507 // management can deal with that.
1508 integer_multipliers = {entry};
1509 if ((multiplier > 0 && !integer_lp_[base_row].ub_is_trivial) ||
1510 (multiplier < 0 && !integer_lp_[base_row].lb_is_trivial)) {
1511 if (AddCutFromConstraints("MIR_1", integer_multipliers)) continue;
1512 }
1513
1514 // Cleanup.
1515 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1516 dense_cut[col] = IntegerValue(0);
1517 }
1518 non_zeros_.SparseClearAll();
1519
1520 // Copy cut.
1521 const LinearConstraintInternal& ct = integer_lp_[entry.first];
1522 for (int i = 0; i < ct.num_terms; ++i) {
1523 const int index = ct.start_in_buffer + i;
1524 const ColIndex col = integer_lp_cols_[index];
1525 const IntegerValue coeff = integer_lp_coeffs_[index];
1526 non_zeros_.Set(col);
1527 dense_cut[col] += coeff * multiplier;
1528 }
1529
1530 used_rows.assign(num_rows, false);
1531 used_rows[entry.first] = true;
1532
1533 // We will aggregate at most kMaxAggregation more rows.
1534 //
1535 // TODO(user): optim + tune.
1536 const int kMaxAggregation = 5;
1537 for (int i = 0; i < kMaxAggregation; ++i) {
1538 // First pick a variable to eliminate. We currently pick a random one with
1539 // a weight that depend on how far it is from its closest bound.
1540 IntegerValue max_magnitude(0);
1541 weights.clear();
1542 std::vector<ColIndex> col_candidates;
1543 for (const ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1544 if (dense_cut[col] == 0) continue;
1545
1546 max_magnitude = std::max(max_magnitude, IntTypeAbs(dense_cut[col]));
1547 const int col_degree =
1548 lp_data_.GetSparseColumn(col).num_entries().value();
1549 if (col_degree <= 1) continue;
1551 continue;
1552 }
1553
1554 const IntegerVariable var = integer_variables_[col.value()];
1555 const double lp_value = expanded_lp_solution_[var];
1556 const double lb = ToDouble(integer_trail_->LevelZeroLowerBound(var));
1557 const double ub = ToDouble(integer_trail_->LevelZeroUpperBound(var));
1558 const double bound_distance = std::min(ub - lp_value, lp_value - lb);
1559 if (bound_distance > 1e-2) {
1560 weights.push_back(bound_distance);
1561 col_candidates.push_back(col);
1562 }
1563 }
1564 if (col_candidates.empty()) break;
1565
1566 const ColIndex var_to_eliminate =
1567 col_candidates[WeightedPick(weights, *random_)];
1568
1569 // What rows can we add to eliminate var_to_eliminate?
1570 std::vector<RowIndex> possible_rows;
1571 weights.clear();
1572 for (const auto entry : lp_data_.GetSparseColumn(var_to_eliminate)) {
1573 const RowIndex row = entry.row();
1574
1575 // We disallow all the rows that contain a variable that we already
1576 // eliminated (or are about to). This mean that we choose rows that
1577 // form a "triangular" matrix on the position we choose to eliminate.
1578 if (used_rows[row]) continue;
1579 used_rows[row] = true;
1580
1581 // We only consider "tight" rows, as defined above.
1582 bool add_row = false;
1583 if (at_ub[row]) {
1584 if (entry.coefficient() > 0.0) {
1585 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1586 } else {
1587 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1588 }
1589 }
1590 if (at_lb[row]) {
1591 if (entry.coefficient() > 0.0) {
1592 if (dense_cut[var_to_eliminate] > 0) add_row = true;
1593 } else {
1594 if (dense_cut[var_to_eliminate] < 0) add_row = true;
1595 }
1596 }
1597 if (add_row) {
1598 possible_rows.push_back(row);
1599 weights.push_back(row_weights[row]);
1600 }
1601 }
1602 if (possible_rows.empty()) break;
1603
1604 const RowIndex row_to_combine =
1605 possible_rows[WeightedPick(weights, *random_)];
1606
1607 // Find the coefficient of the variable to eliminate.
1608 IntegerValue to_combine_coeff = 0;
1609 const LinearConstraintInternal& ct_to_combine =
1610 integer_lp_[row_to_combine];
1611 for (int i = 0; i < ct_to_combine.num_terms; ++i) {
1612 const int index = ct_to_combine.start_in_buffer + i;
1613 if (integer_lp_cols_[index] == var_to_eliminate) {
1614 to_combine_coeff = integer_lp_coeffs_[index];
1615 break;
1616 }
1617 }
1618 CHECK_NE(to_combine_coeff, 0);
1619
1620 IntegerValue mult1 = -to_combine_coeff;
1621 IntegerValue mult2 = dense_cut[var_to_eliminate];
1622 CHECK_NE(mult2, 0);
1623 if (mult1 < 0) {
1624 mult1 = -mult1;
1625 mult2 = -mult2;
1626 }
1627
1628 const IntegerValue gcd = IntegerValue(
1629 MathUtil::GCD64(std::abs(mult1.value()), std::abs(mult2.value())));
1630 CHECK_NE(gcd, 0);
1631 mult1 /= gcd;
1632 mult2 /= gcd;
1633
1634 // Overflow detection.
1635 //
1636 // TODO(user): do that in the possible_rows selection? only problem is
1637 // that we do not have the integer coefficient there...
1638 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1639 max_magnitude = std::max(max_magnitude, IntTypeAbs(entry.second));
1640 }
1641 if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())),
1642 CapProd(infinity_norms_[row_to_combine].value(),
1643 std::abs(mult2.value()))) ==
1644 std::numeric_limits<int64_t>::max()) {
1645 break;
1646 }
1647
1648 for (std::pair<RowIndex, IntegerValue>& entry : integer_multipliers) {
1649 dtime_num_entries += integer_lp_[entry.first].num_terms;
1650 entry.second *= mult1;
1651 }
1652 dtime_num_entries += integer_lp_[row_to_combine].num_terms;
1653 integer_multipliers.push_back({row_to_combine, mult2});
1654
1655 // TODO(user): Not supper efficient to recombine the rows.
1656 if (AddCutFromConstraints(absl::StrCat("MIR_", i + 2),
1657 integer_multipliers)) {
1658 break;
1659 }
1660
1661 // Minor optim: the computation below is only needed if we do one more
1662 // iteration.
1663 if (i + 1 == kMaxAggregation) break;
1664
1665 for (ColIndex col : non_zeros_.PositionsSetAtLeastOnce()) {
1666 dense_cut[col] *= mult1;
1667 }
1668 for (int i = 0; i < ct_to_combine.num_terms; ++i) {
1669 const int index = ct_to_combine.start_in_buffer + i;
1670 const ColIndex col = integer_lp_cols_[index];
1671 const IntegerValue coeff = integer_lp_coeffs_[index];
1672 non_zeros_.Set(col);
1673 dense_cut[col] += coeff * mult2;
1674 }
1675 }
1676 }
1677}
1678
1679void LinearProgrammingConstraint::AddZeroHalfCuts() {
1680 if (time_limit_->LimitReached()) return;
1681
1682 tmp_lp_values_.clear();
1683 tmp_var_lbs_.clear();
1684 tmp_var_ubs_.clear();
1685 for (const IntegerVariable var : integer_variables_) {
1686 tmp_lp_values_.push_back(expanded_lp_solution_[var]);
1687 tmp_var_lbs_.push_back(integer_trail_->LevelZeroLowerBound(var));
1688 tmp_var_ubs_.push_back(integer_trail_->LevelZeroUpperBound(var));
1689 }
1690
1691 // TODO(user): See if it make sense to try to use implied bounds there.
1692 zero_half_cut_helper_.ProcessVariables(tmp_lp_values_, tmp_var_lbs_,
1693 tmp_var_ubs_);
1694 for (glop::RowIndex row(0); row < integer_lp_.size(); ++row) {
1695 // Even though we could use non-tight row, for now we prefer to use tight
1696 // ones.
1697 const auto status = simplex_.GetConstraintStatus(row);
1698 if (status == glop::ConstraintStatus::BASIC) continue;
1699 if (status == glop::ConstraintStatus::FREE) continue;
1700
1701 zero_half_cut_helper_.AddOneConstraint(
1702 row, IntegerLpRowCols(row), IntegerLpRowCoeffs(row),
1703 integer_lp_[row].lb, integer_lp_[row].ub);
1704 }
1705 for (const std::vector<std::pair<RowIndex, IntegerValue>>& multipliers :
1706 zero_half_cut_helper_.InterestingCandidates(random_)) {
1707 if (time_limit_->LimitReached()) break;
1708
1709 // TODO(user): Make sure that if the resulting linear coefficients are not
1710 // too high, we do try a "divisor" of two and thus try a true zero-half cut
1711 // instead of just using our best MIR heuristic (which might still be better
1712 // though).
1713 AddCutFromConstraints("ZERO_HALF", multipliers);
1714 }
1715}
1716
1717void LinearProgrammingConstraint::UpdateSimplexIterationLimit(
1718 const int64_t min_iter, const int64_t max_iter) {
1719 if (parameters_.linearization_level() < 2) return;
1720 const int64_t num_degenerate_columns = CalculateDegeneracy();
1721 const int64_t num_cols = simplex_.GetProblemNumCols().value();
1722 if (num_cols <= 0) {
1723 return;
1724 }
1725 CHECK_GT(num_cols, 0);
1726 const int64_t decrease_factor = (10 * num_degenerate_columns) / num_cols;
1728 // We reached here probably because we predicted wrong. We use this as a
1729 // signal to increase the iterations or punish less for degeneracy compare
1730 // to the other part.
1731 if (is_degenerate_) {
1732 next_simplex_iter_ /= std::max(int64_t{1}, decrease_factor);
1733 } else {
1734 next_simplex_iter_ *= 2;
1735 }
1736 } else if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) {
1737 if (is_degenerate_) {
1738 next_simplex_iter_ /= std::max(int64_t{1}, 2 * decrease_factor);
1739 } else {
1740 // This is the most common case. We use the size of the problem to
1741 // determine the limit and ignore the previous limit.
1742 next_simplex_iter_ = num_cols / 40;
1743 }
1744 }
1745 next_simplex_iter_ =
1746 std::max(min_iter, std::min(max_iter, next_simplex_iter_));
1747}
1748
1750 if (!enabled_) return true;
1751 if (time_limit_->LimitReached()) return true;
1752 UpdateBoundsOfLpVariables();
1753
1754 // TODO(user): It seems the time we loose by not stopping early might be worth
1755 // it because we end up with a better explanation at optimality.
1756 if (/* DISABLES CODE */ (false) && objective_is_defined_) {
1757 // We put a limit on the dual objective since there is no point increasing
1758 // it past our current objective upper-bound (we will already fail as soon
1759 // as we pass it). Note that this limit is properly transformed using the
1760 // objective scaling factor and offset stored in lp_data_.
1761 //
1762 // Note that we use a bigger epsilon here to be sure that if we abort
1763 // because of this, we will report a conflict.
1764 simplex_params_.set_objective_upper_limit(
1765 static_cast<double>(integer_trail_->UpperBound(objective_cp_).value() +
1766 100.0 * kCpEpsilon));
1767 }
1768
1769 // Put an iteration limit on the work we do in the simplex for this call. Note
1770 // that because we are "incremental", even if we don't solve it this time we
1771 // will make progress towards a solve in the lower node of the tree search.
1772 if (trail_->CurrentDecisionLevel() == 0) {
1773 simplex_params_.set_max_number_of_iterations(
1774 parameters_.root_lp_iterations());
1775 } else {
1776 simplex_params_.set_max_number_of_iterations(next_simplex_iter_);
1777 }
1778
1779 simplex_.SetParameters(simplex_params_);
1780 if (!SolveLp()) return true;
1781 if (!AnalyzeLp()) return false;
1782
1783 // Add new constraints to the LP and resolve?
1784 const int max_cuts_rounds = trail_->CurrentDecisionLevel() == 0
1785 ? parameters_.max_cut_rounds_at_level_zero()
1786 : 1;
1787 int cuts_round = 0;
1788 while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL &&
1789 cuts_round < max_cuts_rounds) {
1790 // We wait for the first batch of problem constraints to be added before we
1791 // begin to generate cuts. Note that we rely on num_solves_ since on some
1792 // problems there is no other constraints than the cuts.
1793 cuts_round++;
1794 if (parameters_.cut_level() > 0 &&
1795 (num_solves_ > 1 || !parameters_.add_lp_constraints_lazily())) {
1796 // This must be called first.
1797 implied_bounds_processor_.RecomputeCacheAndSeparateSomeImpliedBoundCuts(
1798 expanded_lp_solution_);
1799 if (parameters_.add_rlt_cuts()) {
1800 rlt_cut_helper_.Initialize(mirror_lp_variable_);
1801 }
1802
1803 // The "generic" cuts are currently part of this class as they are using
1804 // data from the current LP.
1805 //
1806 // TODO(user): Refactor so that they are just normal cut generators?
1807 const int level = trail_->CurrentDecisionLevel();
1808 if (trail_->CurrentDecisionLevel() == 0) {
1809 problem_proven_infeasible_by_cuts_ = false;
1810 if (parameters_.add_objective_cut()) AddObjectiveCut();
1811 if (parameters_.add_mir_cuts()) AddMirCuts();
1812 if (parameters_.add_cg_cuts()) AddCGCuts();
1813 if (parameters_.add_zero_half_cuts()) AddZeroHalfCuts();
1814 if (problem_proven_infeasible_by_cuts_) {
1815 return integer_trail_->ReportConflict({});
1816 }
1817 top_n_cuts_.TransferToManager(&constraint_manager_);
1818 }
1819
1820 // Try to add cuts.
1821 if (level == 0 || !parameters_.only_add_cuts_at_level_zero()) {
1822 for (const CutGenerator& generator : cut_generators_) {
1823 if (level > 0 && generator.only_run_at_level_zero) continue;
1824 if (!generator.generate_cuts(&constraint_manager_)) {
1825 return false;
1826 }
1827 }
1828 }
1829
1830 implied_bounds_processor_.IbCutPool().TransferToManager(
1831 &constraint_manager_);
1832 }
1833
1834 int num_added = 0;
1835 if (constraint_manager_.ChangeLp(&state_, &num_added)) {
1836 ++num_lp_changes_;
1837 simplex_.LoadStateForNextSolve(state_);
1838 if (!CreateLpFromConstraintManager()) {
1839 return integer_trail_->ReportConflict({});
1840 }
1841
1842 // If we didn't add any new constraint, we delay the next Solve() since
1843 // likely the optimal didn't change.
1844 if (num_added == 0) {
1845 break;
1846 }
1847
1848 const double old_obj = simplex_.GetObjectiveValue();
1849 if (!SolveLp()) return true;
1850 if (!AnalyzeLp()) return false;
1852 VLOG(3) << "Relaxation improvement " << old_obj << " -> "
1853 << simplex_.GetObjectiveValue()
1854 << " diff: " << simplex_.GetObjectiveValue() - old_obj
1855 << " level: " << trail_->CurrentDecisionLevel();
1856 }
1857 } else {
1858 if (trail_->CurrentDecisionLevel() == 0) {
1859 lp_at_level_zero_is_final_ = true;
1860 }
1861 break;
1862 }
1863 }
1864
1865 return true;
1866}
1867
1868absl::int128 LinearProgrammingConstraint::GetImpliedLowerBound(
1869 const LinearConstraint& terms) const {
1870 absl::int128 lower_bound(0);
1871 const int size = terms.num_terms;
1872 for (int i = 0; i < size; ++i) {
1873 const IntegerVariable var = terms.vars[i];
1874 const IntegerValue coeff = terms.coeffs[i];
1875 const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
1876 : integer_trail_->UpperBound(var);
1877 lower_bound += absl::int128(bound.value()) * absl::int128(coeff.value());
1878 }
1879 return lower_bound;
1880}
1881
1882bool LinearProgrammingConstraint::ScalingCanOverflow(
1883 int power, bool take_objective_into_account,
1884 absl::Span<const std::pair<glop::RowIndex, double>> multipliers,
1885 int64_t overflow_cap) const {
1886 int64_t bound = 0;
1887 const int64_t factor = int64_t{1} << power;
1888 const double factor_as_double = static_cast<double>(factor);
1889 if (take_objective_into_account) {
1890 bound = CapAdd(bound, CapProd(factor, objective_infinity_norm_.value()));
1891 if (bound >= overflow_cap) return true;
1892 }
1893 for (const auto [row, double_coeff] : multipliers) {
1894 const double magnitude =
1895 std::abs(std::round(double_coeff * factor_as_double));
1896 if (std::isnan(magnitude)) return true;
1897 if (magnitude >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
1898 return true;
1899 }
1900 bound = CapAdd(bound, CapProd(static_cast<int64_t>(magnitude),
1901 infinity_norms_[row].value()));
1902 if (bound >= overflow_cap) return true;
1903 }
1904 return bound >= overflow_cap;
1905}
1906
1907std::vector<std::pair<RowIndex, IntegerValue>>
1908LinearProgrammingConstraint::ScaleLpMultiplier(
1909 bool take_objective_into_account, bool ignore_trivial_constraints,
1910 const std::vector<std::pair<RowIndex, double>>& lp_multipliers,
1911 IntegerValue* scaling, int64_t overflow_cap) const {
1912 *scaling = 0;
1913
1914 // First unscale the values with the LP scaling and remove bad cases.
1915 tmp_cp_multipliers_.clear();
1916 for (const std::pair<RowIndex, double>& p : lp_multipliers) {
1917 const RowIndex row = p.first;
1918 const Fractional lp_multi = p.second;
1919
1920 // We ignore small values since these are likely errors and will not
1921 // contribute much to the new lp constraint anyway.
1922 if (std::abs(lp_multi) < kZeroTolerance) continue;
1923
1924 // Remove constraints that shouldn't be helpful.
1925 //
1926 // In practice, because we can complement the slack, it might still be
1927 // useful to have some constraint with a trivial upper bound.
1928 if (ignore_trivial_constraints) {
1929 if (lp_multi > 0.0 && integer_lp_[row].ub_is_trivial) {
1930 continue;
1931 }
1932 if (lp_multi < 0.0 && integer_lp_[row].lb_is_trivial) {
1933 continue;
1934 }
1935 }
1936
1937 tmp_cp_multipliers_.push_back(
1938 {row, scaler_.UnscaleDualValue(row, lp_multi)});
1939 }
1940
1941 std::vector<std::pair<RowIndex, IntegerValue>> integer_multipliers;
1942 if (tmp_cp_multipliers_.empty()) {
1943 // Empty linear combinaison.
1944 return integer_multipliers;
1945 }
1946
1947 // TODO(user): we currently do not support scaling down, so we just abort
1948 // if with a scaling of 1, we reach the overflow_cap.
1949 if (ScalingCanOverflow(/*power=*/0, take_objective_into_account,
1950 tmp_cp_multipliers_, overflow_cap)) {
1951 ++num_scaling_issues_;
1952 return integer_multipliers;
1953 }
1954
1955 // Note that we don't try to scale by more than 63 since in practice the
1956 // constraint multipliers should not be all super small.
1957 //
1958 // TODO(user): We could be faster here, but trying to compute the exact power
1959 // in one go with floating points seems tricky. So we just do around 6 passes
1960 // plus the one above for zero.
1961 const int power = BinarySearch<int>(0, 63, [&](int candidate) {
1962 // Because BinarySearch() wants f(upper_bound) to fail, we bypass the test
1963 // here as when the set of floating points are really small, we could pass
1964 // with a large power.
1965 if (candidate >= 63) return false;
1966
1967 return !ScalingCanOverflow(candidate, take_objective_into_account,
1968 tmp_cp_multipliers_, overflow_cap);
1969 });
1970 *scaling = int64_t{1} << power;
1971
1972 // Scale the multipliers by *scaling.
1973 // Note that we use the exact same formula as in ScalingCanOverflow().
1974 int64_t gcd = scaling->value();
1975 const double scaling_as_double = static_cast<double>(scaling->value());
1976 for (const auto [row, double_coeff] : tmp_cp_multipliers_) {
1977 const IntegerValue coeff(std::round(double_coeff * scaling_as_double));
1978 if (coeff != 0) {
1979 gcd = std::gcd(gcd, std::abs(coeff.value()));
1980 integer_multipliers.push_back({row, coeff});
1981 }
1982 }
1983 if (gcd > 1) {
1984 *scaling /= gcd;
1985 for (auto& entry : integer_multipliers) {
1986 entry.second /= gcd;
1987 }
1988 }
1989 return integer_multipliers;
1990}
1991
1992template <bool check_overflow>
1993bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
1994 absl::Span<const std::pair<RowIndex, IntegerValue>> integer_multipliers,
1995 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
1996 // Initialize the new constraint.
1997 *upper_bound = 0;
1998 scattered_vector->ClearAndResize(integer_variables_.size());
1999
2000 // Compute the new constraint by taking the linear combination given by
2001 // integer_multipliers of the integer constraints in integer_lp_.
2002 for (const std::pair<RowIndex, IntegerValue>& term : integer_multipliers) {
2003 const RowIndex row = term.first;
2004 const IntegerValue multiplier = term.second;
2005 CHECK_LT(row, integer_lp_.size());
2006
2007 // Update the constraint.
2008 if (!scattered_vector->AddLinearExpressionMultiple<check_overflow>(
2009 multiplier, IntegerLpRowCols(row), IntegerLpRowCoeffs(row))) {
2010 return false;
2011 }
2012
2013 // Update the upper bound.
2014 const IntegerValue bound =
2015 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
2016 if (!AddProductTo(multiplier, bound, upper_bound)) return false;
2017 }
2018
2019 return true;
2020}
2021
2022// TODO(user): no need to update the multipliers.
2023void LinearProgrammingConstraint::AdjustNewLinearConstraint(
2024 std::vector<std::pair<glop::RowIndex, IntegerValue>>* integer_multipliers,
2025 ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const {
2026 const IntegerValue kMaxWantedCoeff(1e18);
2027 bool adjusted = false;
2028 for (std::pair<RowIndex, IntegerValue>& term : *integer_multipliers) {
2029 const RowIndex row = term.first;
2030 const IntegerValue multiplier = term.second;
2031 if (multiplier == 0) continue;
2032
2033 // We will only allow change of the form "multiplier += to_add" with to_add
2034 // in [-negative_limit, positive_limit].
2035 //
2036 // We do not want to_add * row to overflow.
2037 IntegerValue negative_limit =
2038 FloorRatio(kMaxWantedCoeff, infinity_norms_[row]);
2039 IntegerValue positive_limit = negative_limit;
2040
2041 // Make sure we never change the sign of the multiplier, except if the
2042 // row is an equality in which case we don't care.
2043 if (integer_lp_[row].ub != integer_lp_[row].lb) {
2044 if (multiplier > 0) {
2045 negative_limit = std::min(negative_limit, multiplier);
2046 } else {
2047 positive_limit = std::min(positive_limit, -multiplier);
2048 }
2049 }
2050
2051 // Make sure upper_bound + to_add * row_bound never overflow.
2052 const IntegerValue row_bound =
2053 multiplier > 0 ? integer_lp_[row].ub : integer_lp_[row].lb;
2054 if (row_bound != 0) {
2055 const IntegerValue limit1 = FloorRatio(
2056 std::max(IntegerValue(0), kMaxWantedCoeff - IntTypeAbs(*upper_bound)),
2057 IntTypeAbs(row_bound));
2058 const IntegerValue limit2 =
2059 FloorRatio(kMaxWantedCoeff, IntTypeAbs(row_bound));
2060 if ((*upper_bound > 0) == (row_bound > 0)) { // Same sign.
2061 positive_limit = std::min(positive_limit, limit1);
2062 negative_limit = std::min(negative_limit, limit2);
2063 } else {
2064 negative_limit = std::min(negative_limit, limit1);
2065 positive_limit = std::min(positive_limit, limit2);
2066 }
2067 }
2068
2069 // If we add the row to the scattered_vector, diff will indicate by how much
2070 // |upper_bound - ImpliedLB(scattered_vector)| will change. That correspond
2071 // to increasing the multiplier by 1.
2072 //
2073 // At this stage, we are not sure computing sum coeff * bound will not
2074 // overflow, so we use floating point numbers. It is fine to do so since
2075 // this is not directly involved in the actual exact constraint generation:
2076 // these variables are just used in an heuristic.
2077 double common_diff = ToDouble(row_bound);
2078 double positive_diff = 0.0;
2079 double negative_diff = 0.0;
2080
2081 // TODO(user): we could relax a bit some of the condition and allow a sign
2082 // change. It is just trickier to compute the diff when we allow such
2083 // changes.
2084 const LinearConstraintInternal& ct = integer_lp_[row];
2085 for (int i = 0; i < ct.num_terms; ++i) {
2086 const int index = ct.start_in_buffer + i;
2087 const ColIndex col = integer_lp_cols_[index];
2088 const IntegerValue coeff = integer_lp_coeffs_[index];
2089 DCHECK_NE(coeff, 0);
2090
2091 // Moving a variable away from zero seems to improve the bound even
2092 // if it reduces the number of non-zero. Note that this is because of
2093 // this that positive_diff and negative_diff are not the same.
2094 const IntegerValue current = (*scattered_vector)[col];
2095 if (current == 0) {
2096 const IntegerVariable var = integer_variables_[col.value()];
2097 const IntegerValue lb = integer_trail_->LowerBound(var);
2098 const IntegerValue ub = integer_trail_->UpperBound(var);
2099 if (coeff > 0) {
2100 positive_diff -= ToDouble(coeff) * ToDouble(lb);
2101 negative_diff -= ToDouble(coeff) * ToDouble(ub);
2102 } else {
2103 positive_diff -= ToDouble(coeff) * ToDouble(ub);
2104 negative_diff -= ToDouble(coeff) * ToDouble(lb);
2105 }
2106 continue;
2107 }
2108
2109 // We don't want to change the sign of current (except if the variable is
2110 // fixed, but in practice we should have removed any fixed variable, so we
2111 // don't care here) or to have an overflow.
2112 //
2113 // Corner case:
2114 // - IntTypeAbs(current) can be larger than kMaxWantedCoeff!
2115 // - The code assumes that 2 * kMaxWantedCoeff do not overflow.
2116 //
2117 // Note that because we now that limit * abs_coeff will never overflow
2118 // because we used infinity_norms_[row] above.
2119 const IntegerValue abs_coeff = IntTypeAbs(coeff);
2120 const IntegerValue current_magnitude = IntTypeAbs(current);
2121 const IntegerValue overflow_threshold =
2122 std::max(IntegerValue(0), kMaxWantedCoeff - current_magnitude);
2123 if ((current > 0) == (coeff > 0)) { // Same sign.
2124 if (negative_limit * abs_coeff > current_magnitude) {
2125 negative_limit = current_magnitude / abs_coeff;
2126 if (positive_limit == 0 && negative_limit == 0) break;
2127 }
2128 if (positive_limit * abs_coeff > overflow_threshold) {
2129 positive_limit = overflow_threshold / abs_coeff;
2130 if (positive_limit == 0 && negative_limit == 0) break;
2131 }
2132 } else {
2133 if (negative_limit * abs_coeff > overflow_threshold) {
2134 negative_limit = overflow_threshold / abs_coeff;
2135 if (positive_limit == 0 && negative_limit == 0) break;
2136 }
2137 if (positive_limit * abs_coeff > current_magnitude) {
2138 positive_limit = current_magnitude / abs_coeff;
2139 if (positive_limit == 0 && negative_limit == 0) break;
2140 }
2141 }
2142
2143 // This is how diff change.
2144 const IntegerVariable var = integer_variables_[col.value()];
2145 const IntegerValue implied = current > 0
2146 ? integer_trail_->LowerBound(var)
2147 : integer_trail_->UpperBound(var);
2148 if (implied != 0) {
2149 common_diff -= ToDouble(coeff) * ToDouble(implied);
2150 }
2151 }
2152
2153 positive_diff += common_diff;
2154 negative_diff += common_diff;
2155
2156 // Only add a multiple of this row if it tighten the final constraint.
2157 // The positive_diff/negative_diff are supposed to be integer modulo the
2158 // double precision, so we only add a multiple if they seems far away from
2159 // zero.
2160 IntegerValue to_add(0);
2161 if (positive_diff <= -1.0 && positive_limit > 0) {
2162 to_add = positive_limit;
2163 }
2164 if (negative_diff >= 1.0 && negative_limit > 0) {
2165 // Pick this if it is better than the positive sign.
2166 if (to_add == 0 ||
2167 std::abs(ToDouble(negative_limit) * negative_diff) >
2168 std::abs(ToDouble(positive_limit) * positive_diff)) {
2169 to_add = -negative_limit;
2170 }
2171 }
2172 if (to_add != 0) {
2173 term.second += to_add;
2174 *upper_bound += to_add * row_bound;
2175
2176 // TODO(user): we could avoid checking overflow here, but this is likely
2177 // not in the hot loop.
2178 adjusted = true;
2179 CHECK(scattered_vector
2180 ->AddLinearExpressionMultiple</*check_overflow=*/false>(
2181 to_add, IntegerLpRowCols(row), IntegerLpRowCoeffs(row)));
2182 }
2183 }
2184 if (adjusted) ++num_adjusts_;
2185}
2186
2187bool LinearProgrammingConstraint::PropagateLpConstraint(LinearConstraint ct) {
2188 DCHECK(constraint_manager_.DebugCheckConstraint(ct));
2189
2190 // We need to cache this before we std::move() the constraint!
2191 const int num_terms = ct.num_terms;
2192 if (num_terms == 0) {
2193 if (ct.ub >= 0) return true;
2194 return integer_trail_->ReportConflict({}); // Unsat.
2195 }
2196
2197 std::unique_ptr<IntegerSumLE128> cp_constraint(
2198 new IntegerSumLE128(std::move(ct), model_));
2199
2200 // We always propagate level zero bounds first.
2201 // If we are at level zero, there is nothing else to do.
2202 if (!cp_constraint->PropagateAtLevelZero()) return false;
2203 if (trail_->CurrentDecisionLevel() == 0) return true;
2204
2205 // To optimize the memory usage, if the constraint didn't propagate anything,
2206 // we don't need to keep it around.
2207 const int64_t stamp = integer_trail_->num_enqueues();
2208 const bool no_conflict = cp_constraint->Propagate();
2209 if (no_conflict && integer_trail_->num_enqueues() == stamp) return true;
2210
2211 const int64_t current_size =
2212 cumulative_optimal_constraint_sizes_.empty()
2213 ? 0
2214 : cumulative_optimal_constraint_sizes_.back();
2215 optimal_constraints_.push_back(std::move(cp_constraint));
2216 cumulative_optimal_constraint_sizes_.push_back(current_size + num_terms);
2217 rev_optimal_constraints_size_ = optimal_constraints_.size();
2218 return no_conflict;
2219}
2220
2221// The "exact" computation go as follows:
2222//
2223// Given any INTEGER linear combination of the LP constraints, we can create a
2224// new integer constraint that is valid (its computation must not overflow
2225// though). Lets call this "linear_combination <= ub". We can then always add to
2226// it the inequality "objective_terms <= objective_var", so we get:
2227// ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
2228// where ImpliedLB() is computed from the variable current bounds.
2229//
2230// Now, if we use for the linear combination and approximation of the optimal
2231// negated dual LP values (by scaling them and rounding them to integer), we
2232// will get an EXACT objective lower bound that is more or less the same as the
2233// inexact bound given by the LP relaxation. This allows to derive exact reasons
2234// for any propagation done by this constraint.
2235bool LinearProgrammingConstraint::PropagateExactLpReason() {
2236 // Clear old reason and deductions.
2237 integer_reason_.clear();
2238 deductions_.clear();
2239 deductions_reason_.clear();
2240
2241 // The row multipliers will be the negation of the LP duals.
2242 //
2243 // TODO(user): Provide and use a sparse API in Glop to get the duals.
2244 const RowIndex num_rows = simplex_.GetProblemNumRows();
2245 tmp_lp_multipliers_.clear();
2246 for (RowIndex row(0); row < num_rows; ++row) {
2247 const double value = -simplex_.GetDualValue(row);
2248 if (std::abs(value) < kZeroTolerance) continue;
2249 tmp_lp_multipliers_.push_back({row, value});
2250 }
2251
2252 // In this case, the LP lower bound match the basic objective "constraint"
2253 // propagation. That is there is an LP solution with all objective variable at
2254 // their current best bound. There is no need to do more work here.
2255 if (tmp_lp_multipliers_.empty()) return true;
2256
2257 // For the corner case of an objective of size 1, we do not want or need
2258 // to take it into account.
2259 bool take_objective_into_account = true;
2260 if (mirror_lp_variable_.contains(objective_cp_)) {
2261 // The objective is part of the lp.
2262 // This should only happen for objective with a single term.
2263 CHECK_EQ(integer_objective_.size(), 1);
2264 CHECK_EQ(integer_objective_[0].first,
2265 mirror_lp_variable_.at(objective_cp_));
2266 CHECK_EQ(integer_objective_[0].second, IntegerValue(1));
2267
2268 take_objective_into_account = false;
2269 }
2270
2271 IntegerValue scaling = 0;
2272 tmp_integer_multipliers_ = ScaleLpMultiplier(
2273 take_objective_into_account,
2274 /*ignore_trivial_constraints=*/true, tmp_lp_multipliers_, &scaling);
2275 if (scaling == 0) {
2276 VLOG(1) << simplex_.GetProblemStatus();
2277 VLOG(1) << "Issue while computing the exact LP reason. Aborting.";
2278 return true;
2279 }
2280
2281 IntegerValue rc_ub;
2282 CHECK(ComputeNewLinearConstraint</*check_overflow=*/false>(
2283 tmp_integer_multipliers_, &tmp_scattered_vector_, &rc_ub));
2284
2285 std::optional<std::pair<IntegerVariable, IntegerValue>> extra_term =
2286 std::nullopt;
2287 if (take_objective_into_account) {
2288 // The "objective constraint" behave like if the unscaled cp multiplier was
2289 // 1.0, so we will multiply it by this number and add it to reduced_costs.
2290 const IntegerValue obj_scale = scaling;
2291
2292 // TODO(user): Maybe avoid this conversion.
2293 tmp_cols_.clear();
2294 tmp_coeffs_.clear();
2295 for (const auto [col, coeff] : integer_objective_) {
2296 tmp_cols_.push_back(col);
2297 tmp_coeffs_.push_back(coeff);
2298 }
2299 CHECK(tmp_scattered_vector_
2300 .AddLinearExpressionMultiple</*check_overflow=*/false>(
2301 obj_scale, tmp_cols_, tmp_coeffs_));
2302 CHECK(AddProductTo(-obj_scale, integer_objective_offset_, &rc_ub));
2303
2304 extra_term = {objective_cp_, -obj_scale};
2305 }
2306
2307 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2308 &rc_ub);
2309
2310 // Create the IntegerSumLE that will allow to propagate the objective and more
2311 // generally do the reduced cost fixing.
2312 LinearConstraint explanation =
2313 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_, rc_ub,
2314 extra_term);
2315
2316 // Corner case where prevent overflow removed all terms.
2317 if (explanation.num_terms == 0) {
2318 trail_->MutableConflict()->clear();
2319 return explanation.ub >= 0;
2320 }
2321
2322 return PropagateLpConstraint(std::move(explanation));
2323}
2324
2325bool LinearProgrammingConstraint::PropagateExactDualRay() {
2326 IntegerValue scaling;
2327 const glop::DenseColumn ray = simplex_.GetDualRay();
2328 tmp_lp_multipliers_.clear();
2329 for (RowIndex row(0); row < ray.size(); ++row) {
2330 const double value = ray[row];
2331 if (std::abs(value) < kZeroTolerance) continue;
2332 tmp_lp_multipliers_.push_back({row, value});
2333 }
2334 tmp_integer_multipliers_ = ScaleLpMultiplier(
2335 /*take_objective_into_account=*/false,
2336 /*ignore_trivial_constraints=*/true, tmp_lp_multipliers_, &scaling);
2337 if (scaling == 0) {
2338 VLOG(1) << "Isse while computing the exact dual ray reason. Aborting.";
2339 return true;
2340 }
2341
2342 IntegerValue new_constraint_ub;
2343 CHECK(ComputeNewLinearConstraint</*check_overflow=*/false>(
2344 tmp_integer_multipliers_, &tmp_scattered_vector_, &new_constraint_ub))
2345 << scaling;
2346
2347 AdjustNewLinearConstraint(&tmp_integer_multipliers_, &tmp_scattered_vector_,
2348 &new_constraint_ub);
2349
2350 LinearConstraint explanation =
2351 tmp_scattered_vector_.ConvertToLinearConstraint(integer_variables_,
2352 new_constraint_ub);
2353
2354 std::string message;
2355 if (VLOG_IS_ON(1)) {
2356 // Unfortunately, we need to set this up before we std::move() it.
2357 message = absl::StrCat("LP exact dual ray not infeasible,",
2358 " implied_lb: ", GetImpliedLowerBound(explanation),
2359 " ub: ", explanation.ub.value());
2360 }
2361
2362 // This should result in a conflict if the precision is good enough.
2363 if (!PropagateLpConstraint(std::move(explanation))) return false;
2364
2365 VLOG(1) << message;
2366 return true;
2367}
2368
2369int64_t LinearProgrammingConstraint::CalculateDegeneracy() {
2370 const glop::ColIndex num_vars = simplex_.GetProblemNumCols();
2371 int num_non_basic_with_zero_rc = 0;
2372 for (glop::ColIndex i(0); i < num_vars; ++i) {
2373 const double rc = simplex_.GetReducedCost(i);
2374 if (rc != 0.0) continue;
2376 continue;
2377 }
2378 num_non_basic_with_zero_rc++;
2379 }
2380 const int64_t num_cols = simplex_.GetProblemNumCols().value();
2381 is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols;
2382 return num_non_basic_with_zero_rc;
2383}
2384
2385void LinearProgrammingConstraint::ReducedCostStrengtheningDeductions(
2386 double cp_objective_delta) {
2387 deductions_.clear();
2388
2389 // TRICKY: while simplex_.GetObjectiveValue() use the objective scaling factor
2390 // stored in the lp_data_, all the other functions like GetReducedCost() or
2391 // GetVariableValue() do not.
2392 const double lp_objective_delta =
2393 cp_objective_delta / lp_data_.objective_scaling_factor();
2394 const int num_vars = integer_variables_.size();
2395 for (int i = 0; i < num_vars; i++) {
2396 const IntegerVariable cp_var = integer_variables_[i];
2397 const glop::ColIndex lp_var = glop::ColIndex(i);
2398 const double rc = simplex_.GetReducedCost(lp_var);
2399 const double value = simplex_.GetVariableValue(lp_var);
2400
2401 if (rc == 0.0) continue;
2402 const double lp_other_bound = value + lp_objective_delta / rc;
2403 const double cp_other_bound =
2404 scaler_.UnscaleVariableValue(lp_var, lp_other_bound);
2405
2406 if (rc > kLpEpsilon) {
2407 const double ub = ToDouble(integer_trail_->UpperBound(cp_var));
2408 const double new_ub = std::floor(cp_other_bound + kCpEpsilon);
2409 if (new_ub < ub) {
2410 // TODO(user): Because rc > kLpEpsilon, the lower_bound of cp_var
2411 // will be part of the reason returned by FillReducedCostsReason(), but
2412 // we actually do not need it here. Same below.
2413 const IntegerValue new_ub_int(static_cast<IntegerValue>(new_ub));
2414 deductions_.push_back(IntegerLiteral::LowerOrEqual(cp_var, new_ub_int));
2415 }
2416 } else if (rc < -kLpEpsilon) {
2417 const double lb = ToDouble(integer_trail_->LowerBound(cp_var));
2418 const double new_lb = std::ceil(cp_other_bound - kCpEpsilon);
2419 if (new_lb > lb) {
2420 const IntegerValue new_lb_int(static_cast<IntegerValue>(new_lb));
2421 deductions_.push_back(
2422 IntegerLiteral::GreaterOrEqual(cp_var, new_lb_int));
2423 }
2424 }
2425 }
2426}
2427
2428void LinearProgrammingConstraint::UpdateAverageReducedCosts() {
2429 const int num_vars = integer_variables_.size();
2430 if (sum_cost_down_.size() < num_vars) {
2431 sum_cost_down_.resize(num_vars, 0.0);
2432 num_cost_down_.resize(num_vars, 0);
2433 sum_cost_up_.resize(num_vars, 0.0);
2434 num_cost_up_.resize(num_vars, 0);
2435 rc_scores_.resize(num_vars, 0.0);
2436 }
2437
2438 // Decay averages.
2439 num_calls_since_reduced_cost_averages_reset_++;
2440 if (num_calls_since_reduced_cost_averages_reset_ == 10000) {
2441 for (int i = 0; i < num_vars; i++) {
2442 sum_cost_up_[i] /= 2;
2443 num_cost_up_[i] /= 2;
2444 sum_cost_down_[i] /= 2;
2445 num_cost_down_[i] /= 2;
2446 }
2447 num_calls_since_reduced_cost_averages_reset_ = 0;
2448 }
2449
2450 // Accumulate reduced costs of all unassigned variables.
2451 for (int i = 0; i < num_vars; i++) {
2452 const IntegerVariable var = integer_variables_[i];
2453
2454 // Skip fixed variables.
2455 if (integer_trail_->IsFixed(var)) continue;
2456
2457 // Skip reduced costs that are zero or close.
2458 const double rc = lp_reduced_cost_[i];
2459 if (std::abs(rc) < kCpEpsilon) continue;
2460
2461 if (rc < 0.0) {
2462 sum_cost_down_[i] -= rc;
2463 num_cost_down_[i]++;
2464 } else {
2465 sum_cost_up_[i] += rc;
2466 num_cost_up_[i]++;
2467 }
2468 }
2469
2470 // Tricky, we artificially reset the rc_rev_int_repository_ to level zero
2471 // so that the rev_rc_start_ is zero.
2472 rc_rev_int_repository_.SetLevel(0);
2473 rc_rev_int_repository_.SetLevel(trail_->CurrentDecisionLevel());
2474 rev_rc_start_ = 0;
2475
2476 // Cache the new score (higher is better) using the average reduced costs
2477 // as a signal.
2478 positions_by_decreasing_rc_score_.clear();
2479 for (int i = 0; i < num_vars; i++) {
2480 // If only one direction exist, we takes its value divided by 2, so that
2481 // such variable should have a smaller cost than the min of the two side
2482 // except if one direction have a really high reduced costs.
2483 const double a_up =
2484 num_cost_up_[i] > 0 ? sum_cost_up_[i] / num_cost_up_[i] : 0.0;
2485 const double a_down =
2486 num_cost_down_[i] > 0 ? sum_cost_down_[i] / num_cost_down_[i] : 0.0;
2487 if (num_cost_down_[i] > 0 && num_cost_up_[i] > 0) {
2488 rc_scores_[i] = std::min(a_up, a_down);
2489 } else {
2490 rc_scores_[i] = 0.5 * (a_down + a_up);
2491 }
2492
2493 // We ignore scores of zero (i.e. no data) and will follow the default
2494 // search heuristic if all variables are like this.
2495 if (rc_scores_[i] > 0.0) {
2496 positions_by_decreasing_rc_score_.push_back({-rc_scores_[i], i});
2497 }
2498 }
2499 std::sort(positions_by_decreasing_rc_score_.begin(),
2500 positions_by_decreasing_rc_score_.end());
2501}
2502
2503std::function<IntegerLiteral()>
2505 return [this]() { return this->LPReducedCostAverageDecision(); };
2506}
2507
2508IntegerLiteral LinearProgrammingConstraint::LPReducedCostAverageDecision() {
2509 // Select noninstantiated variable with highest positive average reduced cost.
2510 int selected_index = -1;
2511 const int size = positions_by_decreasing_rc_score_.size();
2512 rc_rev_int_repository_.SaveState(&rev_rc_start_);
2513 for (int i = rev_rc_start_; i < size; ++i) {
2514 const int index = positions_by_decreasing_rc_score_[i].second;
2515 const IntegerVariable var = integer_variables_[index];
2516 if (integer_trail_->IsFixed(var)) continue;
2517 selected_index = index;
2518 rev_rc_start_ = i;
2519 break;
2520 }
2521
2522 if (selected_index == -1) return IntegerLiteral();
2523 const IntegerVariable var = integer_variables_[selected_index];
2524
2525 // If ceil(value) is current upper bound, try var == upper bound first.
2526 // Guarding with >= prevents numerical problems.
2527 // With 0/1 variables, this will tend to try setting to 1 first,
2528 // which produces more shallow trees.
2529 const IntegerValue ub = integer_trail_->UpperBound(var);
2530 const IntegerValue value_ceil(
2531 std::ceil(this->GetSolutionValue(var) - kCpEpsilon));
2532 if (value_ceil >= ub) {
2534 }
2535
2536 // If floor(value) is current lower bound, try var == lower bound first.
2537 // Guarding with <= prevents numerical problems.
2538 const IntegerValue lb = integer_trail_->LowerBound(var);
2539 const IntegerValue value_floor(
2540 std::floor(this->GetSolutionValue(var) + kCpEpsilon));
2541 if (value_floor <= lb) {
2543 }
2544
2545 // Here lb < value_floor <= value_ceil < ub.
2546 // Try the most promising split between var <= floor or var >= ceil.
2547 const double a_up =
2548 num_cost_up_[selected_index] > 0
2549 ? sum_cost_up_[selected_index] / num_cost_up_[selected_index]
2550 : 0.0;
2551 const double a_down =
2552 num_cost_down_[selected_index] > 0
2553 ? sum_cost_down_[selected_index] / num_cost_down_[selected_index]
2554 : 0.0;
2555 if (a_down < a_up) {
2556 return IntegerLiteral::LowerOrEqual(var, value_floor);
2557 } else {
2558 return IntegerLiteral::GreaterOrEqual(var, value_ceil);
2559 }
2560}
2561
2562absl::Span<const glop::ColIndex> LinearProgrammingConstraint::IntegerLpRowCols(
2563 glop::RowIndex row) const {
2564 const int start = integer_lp_[row].start_in_buffer;
2565 const size_t num_terms = static_cast<size_t>(integer_lp_[row].num_terms);
2566 return {integer_lp_cols_.data() + start, num_terms};
2567}
2568
2569absl::Span<const IntegerValue> LinearProgrammingConstraint::IntegerLpRowCoeffs(
2570 glop::RowIndex row) const {
2571 const int start = integer_lp_[row].start_in_buffer;
2572 const size_t num_terms = static_cast<size_t>(integer_lp_[row].num_terms);
2573 return {integer_lp_coeffs_.data() + start, num_terms};
2574}
2575
2576} // namespace sat
2577} // namespace operations_research
IntegerValue size
static int64_t GCD64(int64_t x, int64_t y)
Definition mathutil.h:108
void SetLevel(int level) final
Definition rev.h:135
void SaveState(T *object)
Definition rev.h:62
std::string GetDimensionString() const
A short string with the problem dimension.
Definition lp_data.cc:434
void Clear()
Clears, i.e. reset the object to its initial value.
Definition lp_data.cc:143
void SetObjectiveOffset(Fractional objective_offset)
Definition lp_data.cc:340
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition lp_data.cc:335
const DenseColumn & constraint_lower_bounds() const
Definition lp_data.h:223
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition lp_data.cc:258
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition lp_data.cc:318
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Defines the coefficient for col / row.
Definition lp_data.cc:326
const DenseColumn & constraint_upper_bounds() const
Definition lp_data.h:226
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition lp_data.cc:418
Fractional objective_scaling_factor() const
Definition lp_data.h:269
RowIndex num_constraints() const
Returns the number of constraints.
Definition lp_data.h:216
Fractional UnscaleDualValue(RowIndex row, Fractional value) const
Fractional UnscaleConstraintActivity(RowIndex row, Fractional value) const
void Scale(LinearProgram *lp)
Scale the given LP.
Fractional UnscaleReducedCost(ColIndex col, Fractional value) const
Fractional VariableScalingFactor(ColIndex col) const
Fractional UnscaleVariableValue(ColIndex col, Fractional value) const
Transforms corresponding value from the scaled domain to the original one.
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
ConstraintStatus GetConstraintStatus(RowIndex row) const
void LoadStateForNextSolve(const BasisState &state)
Uses the given state as a warm-start for the next Solve() call.
Fractional GetDualValue(RowIndex row) const
Fractional GetReducedCost(ColIndex col) const
const ScatteredRow & GetUnitRowLeftInverse(RowIndex row)
Fractional GetConstraintActivity(RowIndex row) const
const DenseRow & GetDualRayRowCombination() const
This is the "dual ray" linear combination of the matrix rows.
RowIndex GetProblemNumRows() const
Getters to retrieve all the information computed by the last Solve().
VariableStatus GetVariableStatus(ColIndex col) const
void SetParameters(const GlopParameters &parameters)
Sets or gets the algorithm parameters to be used on the next Solve().
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
EntryIndex num_entries() const
Note this method can only be used when the vector has no duplicates.
bool TrySimpleSeparation(const CutData &input_ct)
Tries RLT separation of the input constraint. Returns true on success.
Definition cuts.cc:1660
void Initialize(const absl::flat_hash_map< IntegerVariable, glop::ColIndex > &lp_vars)
Definition cuts.cc:1653
std::string Info() const
Single line of text that we append to the cut log line.
Definition cuts.h:586
const CutData & cut() const
If successful, this contains the last generated cut.
Definition cuts.h:583
const CutData & cut() const
If successful, info about the last generated cut.
Definition cuts.h:507
bool TrySimpleKnapsack(const CutData &input_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1293
std::string Info() const
Single line of text that we append to the cut log line.
Definition cuts.h:510
bool TryWithLetchfordSouliLifting(const CutData &input_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1512
bool TrySingleNodeFlow(const CutData &input_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Definition cuts.cc:1408
bool MightBeReachable(int64_t sum) const
Definition util.h:468
void Add(const int64_t positive_value)
Definition util.h:442
void WatchUpperBound(IntegerVariable var, int id, int watch_index=-1)
Definition integer.h:1870
void SetPropagatorPriority(int id, int priority)
Definition integer.cc:2358
void WatchIntegerVariable(IntegerVariable i, int id, int watch_index=-1)
Definition integer.h:1876
int Register(PropagatorInterface *propagator)
Registers a propagator and returns its unique ids.
Definition integer.cc:2332
void RecomputeCacheAndSeparateSomeImpliedBoundCuts(const util_intops::StrongVector< IntegerVariable, double > &lp_values)
Definition cuts.cc:2094
bool ComputeCut(RoundingOptions options, const CutData &base_ct, ImpliedBoundsProcessor *ib_processor=nullptr)
Returns true on success. The cut can be accessed via cut().
Definition cuts.cc:785
const CutData & cut() const
If successful, info about the last generated cut.
Definition cuts.h:415
std::string Info() const
Single line of text that we append to the cut log line.
Definition cuts.h:420
IntegerValue LowerBound(IntegerVariable i) const
Returns the current lower/upper bound of the given integer variable.
Definition integer.h:1717
void RemoveLevelZeroBounds(std::vector< IntegerLiteral > *reason) const
Definition integer.cc:1159
IntegerLiteral UpperBoundAsLiteral(IntegerVariable i) const
Definition integer.h:1765
bool IsFixedAtLevelZero(IntegerVariable var) const
Returns true if the variable is fixed at level 0.
Definition integer.h:1822
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1721
IntegerLiteral LowerBoundAsLiteral(IntegerVariable i) const
Definition integer.h:1760
void RegisterReversibleClass(ReversibleInterface *rev)
Definition integer.h:1099
bool ReportConflict(absl::Span< const Literal > literal_reason, absl::Span< const IntegerLiteral > integer_reason)
Definition integer.h:1076
bool IsFixed(IntegerVariable i) const
Checks if the variable is fixed.
Definition integer.h:1725
IntegerValue LevelZeroUpperBound(IntegerVariable var) const
Definition integer.h:1817
IntegerValue LevelZeroLowerBound(IntegerVariable var) const
Returns globally valid lower/upper bound on the given integer variable.
Definition integer.h:1812
ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit)
Definition integer.h:972
const std::vector< ConstraintIndex > & LpConstraints() const
bool ChangeLp(glop::BasisState *solution_state, int *num_new_constraints=nullptr)
ConstraintIndex Add(LinearConstraint ct, bool *added=nullptr)
bool AddCut(LinearConstraint ct, std::string type_name, std::string extra_info="")
bool UpdateConstraintUb(glop::RowIndex index_in_lp, IntegerValue new_ub)
bool UpdateConstraintLb(glop::RowIndex index_in_lp, IntegerValue new_lb)
These must be level zero bounds.
const util_intops::StrongVector< ConstraintIndex, ConstraintInfo > & AllConstraints() const
All the constraints managed by this class.
void SetObjectiveCoefficient(IntegerVariable var, IntegerValue coeff)
A class that stores the collection of all LP constraints in a model.
void SetLevel(int level) override
ReversibleInterface API.
void AddCutGenerator(CutGenerator generator)
Register a new cut generator with this constraint.
void AddLinearConstraint(LinearConstraint ct)
Add a new linear constraint to this LP.
void SetObjectiveCoefficient(IntegerVariable ivar, IntegerValue coeff)
bool IncrementalPropagate(const std::vector< int > &watch_indices) override
LinearProgrammingConstraint(Model *model, absl::Span< const IntegerVariable > vars)
const std::string & Name() const
Definition model.h:185
bool AddLinearExpressionMultiple(IntegerValue multiplier, absl::Span< const glop::ColIndex > cols, absl::Span< const IntegerValue > coeffs)
bool Add(glop::ColIndex col, IntegerValue value)
Does vector[col] += value and return false in case of overflow.
LinearConstraint ConvertToLinearConstraint(absl::Span< const IntegerVariable > integer_variables, IntegerValue upper_bound, std::optional< std::pair< IntegerVariable, IntegerValue > > extra_term=std::nullopt)
void ConvertToCutData(absl::int128 rhs, absl::Span< const IntegerVariable > integer_variables, absl::Span< const double > lp_solution, IntegerTrail *integer_trail, CutData *result)
std::vector< std::pair< glop::ColIndex, IntegerValue > > GetTerms()
Similar to ConvertToLinearConstraint().
void UpdateInnerObjectiveBounds(const std::string &update_info, IntegerValue lb, IntegerValue ub)
Updates the inner objective bounds.
Simple class to add statistics by name and print them at the end.
void TransferToManager(LinearConstraintManager *manager)
Empty the local pool and add all its content to the manager.
void AddCut(LinearConstraint ct, absl::string_view name, const util_intops::StrongVector< IntegerVariable, double > &lp_solution)
Adds a cut to the local pool.
std::vector< Literal > * MutableConflict()
Definition sat_base.h:433
void AddOneConstraint(glop::RowIndex, absl::Span< const glop::ColIndex > cols, absl::Span< const IntegerValue > coeffs, IntegerValue lb, IntegerValue ub)
std::vector< std::vector< std::pair< glop::RowIndex, IntegerValue > > > InterestingCandidates(ModelRandomGenerator *random)
void ProcessVariables(const std::vector< double > &lp_values, absl::Span< const IntegerValue > lower_bounds, absl::Span< const IntegerValue > upper_bounds)
void assign(size_type n, const value_type &val)
void push_back(const value_type &val)
void resize(size_type new_size)
const std::string name
A name for logging purposes.
const Constraint * ct
int64_t value
IntVar * var
absl::Status status
Definition g_gurobi.cc:44
double upper_bound
double lower_bound
GRBmodel * model
int index
const bool DEBUG_MODE
Definition macros.h:24
ColIndex col
Definition markowitz.cc:187
RowIndex row
Definition markowitz.cc:186
ColIndex RowToColIndex(RowIndex row)
Get the ColIndex corresponding to the column # row.
Definition lp_types.h:54
RowIndex ColToRowIndex(ColIndex col)
Get the RowIndex corresponding to the row # col.
Definition lp_types.h:57
StrictITIVector< RowIndex, Fractional > DenseColumn
Column-vector types. Column-vector types are indexed by a row index.
Definition lp_types.h:382
StrictITIVector< ColIndex, Fractional > DenseRow
Row-vector types. Row-vector types are indexed by a column index.
Definition lp_types.h:353
IntegerValue FloorRatio(IntegerValue dividend, IntegerValue positive_divisor)
Definition integer.h:94
bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result)
Computes result += a * b, and return false iff there is an overflow.
Definition integer.h:169
void DivideByGCD(LinearConstraint *constraint)
IntType IntTypeAbs(IntType t)
Definition integer.h:81
std::vector< IntegerVariable > NegationOf(const std::vector< IntegerVariable > &vars)
Returns the vector of the negated variables.
Definition integer.cc:51
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
IntegerVariable PositiveVariable(IntegerVariable i)
Definition integer.h:193
int WeightedPick(absl::Span< const double > input, absl::BitGenRef random)
Definition util.cc:382
bool VariableIsPositive(IntegerVariable i)
Definition integer.h:189
LinearConstraintPropagator< true > IntegerSumLE128
double ToDouble(IntegerValue value)
Definition integer.h:73
In SWIG mode, we don't want anything besides these top-level includes.
bool AtMinOrMaxInt64(int64_t x)
Checks if x is equal to the min or the max value of an int64_t.
int64_t CapAdd(int64_t x, int64_t y)
int64_t CapProd(int64_t x, int64_t y)
Point BinarySearch(Point x_true, Point x_false, std::function< bool(Point)> f)
int64_t bound
int64_t stamp
Definition search.cc:3270
int64_t start
const std::optional< Range > & range
Definition statistics.cc:37
Our cut are always of the form linear_expression <= rhs.
Definition cuts.h:110
bool FillFromLinearConstraint(const LinearConstraint &base_ct, const util_intops::StrongVector< IntegerVariable, double > &lp_values, IntegerTrail *integer_trail)
Definition cuts.cc:170
bool AllCoefficientsArePositive() const
These functions transform the cut by complementation.
Definition cuts.cc:228
std::vector< CutTerm > terms
Definition cuts.h:142
bool AppendOneTerm(IntegerVariable var, IntegerValue coeff, double lp_value, IntegerValue lb, IntegerValue ub)
Definition cuts.cc:133
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
Definition integer.h:1667
static IntegerLiteral LowerOrEqual(IntegerVariable i, IntegerValue bound)
Definition integer.h:1673
std::unique_ptr< IntegerValue[]> coeffs
std::unique_ptr< IntegerVariable[]> vars
std::string message
Definition trace.cc:397