Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
bop_fs.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/bop/bop_fs.h"
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <limits>
20#include <memory>
21#include <string>
22#include <utility>
23#include <vector>
24
25#include "absl/log/check.h"
26#include "absl/random/bit_gen_ref.h"
27#include "absl/random/distributions.h"
28#include "absl/strings/str_format.h"
29#include "absl/strings/string_view.h"
34#include "ortools/bop/bop_parameters.pb.h"
39#include "ortools/glop/parameters.pb.h"
43#include "ortools/sat/boolean_problem.pb.h"
44#include "ortools/sat/clause.h"
48#include "ortools/sat/sat_parameters.pb.h"
51#include "ortools/sat/util.h"
54
55namespace operations_research {
56namespace bop {
57namespace {
58
59using ::operations_research::glop::ColIndex;
60using ::operations_research::glop::DenseRow;
61using ::operations_research::glop::GlopParameters;
62using ::operations_research::glop::RowIndex;
63
65 int64_t lower_bound) {
66 // The lower bound might be greater that the cost of a feasible solution due
67 // to rounding errors in the problem scaling and Glop.
68 return solution.IsFeasible() ? (solution.GetCost() <= lower_bound
72}
73
74bool AllIntegralValues(const DenseRow& values, double tolerance) {
75 for (const glop::Fractional value : values) {
76 // Note that this test is correct because in this part of the code, Bop
77 // only deals with boolean variables.
78 if (value >= tolerance && value + tolerance < 1.0) {
79 return false;
80 }
81 }
82 return true;
83}
84
85void DenseRowToBopSolution(const DenseRow& values, BopSolution* solution) {
86 CHECK(solution != nullptr);
87 CHECK_EQ(solution->Size(), values.size());
88 for (VariableIndex var(0); var < solution->Size(); ++var) {
89 solution->SetValue(var, round(values[ColIndex(var.value())]));
90 }
91}
92} // anonymous namespace
93
94//------------------------------------------------------------------------------
95// GuidedSatFirstSolutionGenerator
96//------------------------------------------------------------------------------
97
99 absl::string_view name, Policy policy)
101 policy_(policy),
102 abort_(false),
103 state_update_stamp_(ProblemState::kInitialStampValue),
104 sat_solver_() {}
105
107
108BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::SynchronizeIfNeeded(
109 const ProblemState& problem_state) {
110 if (state_update_stamp_ == problem_state.update_stamp()) {
112 }
113 state_update_stamp_ = problem_state.update_stamp();
114
115 // Create the sat_solver if not already done.
116 if (!sat_solver_) {
117 sat_solver_ = std::make_unique<sat::SatSolver>();
118
119 // Add in symmetries.
120 if (problem_state.GetParameters()
121 .exploit_symmetry_in_sat_first_solution()) {
122 std::vector<std::unique_ptr<SparsePermutation>> generators;
124 &generators);
125 std::unique_ptr<sat::SymmetryPropagator> propagator(
127 for (int i = 0; i < generators.size(); ++i) {
128 propagator->AddSymmetry(std::move(generators[i]));
129 }
130 sat_solver_->AddPropagator(propagator.get());
131 sat_solver_->TakePropagatorOwnership(std::move(propagator));
132 }
133 }
134
135 const BopOptimizerBase::Status load_status =
136 LoadStateProblemToSatSolver(problem_state, sat_solver_.get());
137 if (load_status != BopOptimizerBase::CONTINUE) return load_status;
138
139 switch (policy_) {
141 break;
143 for (ColIndex col(0); col < problem_state.lp_values().size(); ++col) {
144 const double value = problem_state.lp_values()[col];
145 sat_solver_->SetAssignmentPreference(
146 sat::Literal(sat::BooleanVariable(col.value()), round(value) == 1),
147 1 - fabs(value - round(value)));
148 }
149 break;
152 sat_solver_.get());
153 break;
155 for (int i = 0; i < problem_state.assignment_preference().size(); ++i) {
156 sat_solver_->SetAssignmentPreference(
157 sat::Literal(sat::BooleanVariable(i),
158 problem_state.assignment_preference()[i]),
159 1.0);
160 }
161 break;
162 }
164}
165
167 const ProblemState& problem_state) const {
168 if (abort_) return false;
169 if (policy_ == Policy::kLpGuided && problem_state.lp_values().empty()) {
170 return false;
171 }
172 if (policy_ == Policy::kUserGuided &&
173 problem_state.assignment_preference().empty()) {
174 return false;
175 }
176 return true;
177}
178
180 const BopParameters& parameters, const ProblemState& problem_state,
182 CHECK(learned_info != nullptr);
183 CHECK(time_limit != nullptr);
184 learned_info->Clear();
185
186 const BopOptimizerBase::Status sync_status =
187 SynchronizeIfNeeded(problem_state);
188 if (sync_status != BopOptimizerBase::CONTINUE) return sync_status;
189
190 sat::SatParameters sat_params;
191 sat_params.set_max_time_in_seconds(time_limit->GetTimeLeft());
192 sat_params.set_max_deterministic_time(time_limit->GetDeterministicTimeLeft());
193 sat_params.set_random_seed(parameters.random_seed());
194
195 // We use a relatively small conflict limit so that other optimizer get a
196 // chance to run if this one is slow. Note that if this limit is reached, we
197 // will return BopOptimizerBase::CONTINUE so that Optimize() will be called
198 // again later to resume the current work.
199 sat_params.set_max_number_of_conflicts(
200 parameters.guided_sat_conflicts_chunk());
201 sat_solver_->SetParameters(sat_params);
202
203 const double initial_deterministic_time = sat_solver_->deterministic_time();
204 const sat::SatSolver::Status sat_status = sat_solver_->Solve();
205 time_limit->AdvanceDeterministicTime(sat_solver_->deterministic_time() -
206 initial_deterministic_time);
207
208 if (sat_status == sat::SatSolver::INFEASIBLE) {
209 if (policy_ != Policy::kNotGuided) abort_ = true;
210 if (problem_state.upper_bound() != std::numeric_limits<int64_t>::max()) {
211 // As the solution in the state problem is feasible, it is proved optimal.
212 learned_info->lower_bound = problem_state.upper_bound();
214 }
215 // The problem is proved infeasible
217 }
218
219 ExtractLearnedInfoFromSatSolver(sat_solver_.get(), learned_info);
220 if (sat_status == sat::SatSolver::FEASIBLE) {
221 SatAssignmentToBopSolution(sat_solver_->Assignment(),
222 &learned_info->solution);
223 return SolutionStatus(learned_info->solution, problem_state.lower_bound());
224 }
225
227}
228
229//------------------------------------------------------------------------------
230// BopRandomFirstSolutionGenerator
231//------------------------------------------------------------------------------
233 absl::string_view name, const BopParameters& parameters,
234 sat::SatSolver* sat_propagator, absl::BitGenRef random)
236 random_(random),
237 sat_propagator_(sat_propagator) {}
238
240
241// Only run the RandomFirstSolution when there is an objective to minimize.
243 const ProblemState& problem_state) const {
244 return problem_state.original_problem().objective().literals_size() > 0;
245}
248 const BopParameters& parameters, const ProblemState& problem_state,
249 LearnedInfo* learned_info, TimeLimit* time_limit) {
250 CHECK(learned_info != nullptr);
251 CHECK(time_limit != nullptr);
252 learned_info->Clear();
253
254 // Save the current solver heuristics.
255 const sat::SatParameters saved_params = sat_propagator_->parameters();
256 const std::vector<std::pair<sat::Literal, float>> saved_prefs =
257 sat_propagator_->AllPreferences();
258
259 const int kMaxNumConflicts = 10;
260 int64_t best_cost = problem_state.solution().IsFeasible()
261 ? problem_state.solution().GetCost()
262 : std::numeric_limits<int64_t>::max();
263 int64_t remaining_num_conflicts =
264 parameters.max_number_of_conflicts_in_random_solution_generation();
265 int64_t old_num_failures = 0;
266
267 // Optimization: Since each Solve() is really fast, we want to limit as
268 // much as possible the work around one.
269 bool objective_need_to_be_overconstrained =
270 (best_cost != std::numeric_limits<int64_t>::max());
271
272 bool solution_found = false;
273 while (remaining_num_conflicts > 0 && !time_limit->LimitReached()) {
274 sat_propagator_->Backtrack(0);
275 old_num_failures = sat_propagator_->num_failures();
276
277 sat::SatParameters sat_params = saved_params;
278 sat::RandomizeDecisionHeuristic(random_, &sat_params);
279 sat_params.set_max_number_of_conflicts(kMaxNumConflicts);
280 sat_propagator_->SetParameters(sat_params);
281 sat_propagator_->ResetDecisionHeuristic();
282
283 if (objective_need_to_be_overconstrained) {
284 if (!AddObjectiveConstraint(
285 problem_state.original_problem(), false, sat::Coefficient(0),
286 true, sat::Coefficient(best_cost) - 1, sat_propagator_)) {
287 // The solution is proved optimal (if any).
288 learned_info->lower_bound = best_cost;
289 return best_cost == std::numeric_limits<int64_t>::max()
292 }
293 objective_need_to_be_overconstrained = false;
294 }
295
296 // Special assignment preference parameters.
297 const int preference = absl::Uniform(random_, 0, 4);
298 if (preference == 0) {
299 UseObjectiveForSatAssignmentPreference(problem_state.original_problem(),
300 sat_propagator_);
301 } else if (preference == 1 && !problem_state.lp_values().empty()) {
302 // Assign SAT assignment preference based on the LP solution.
303 for (ColIndex col(0); col < problem_state.lp_values().size(); ++col) {
304 const double value = problem_state.lp_values()[col];
305 sat_propagator_->SetAssignmentPreference(
306 sat::Literal(sat::BooleanVariable(col.value()), round(value) == 1),
307 1 - fabs(value - round(value)));
308 }
309 }
310
311 const sat::SatSolver::Status sat_status =
312 sat_propagator_->SolveWithTimeLimit(time_limit);
313 if (sat_status == sat::SatSolver::FEASIBLE) {
314 objective_need_to_be_overconstrained = true;
315 solution_found = true;
316 SatAssignmentToBopSolution(sat_propagator_->Assignment(),
317 &learned_info->solution);
318 CHECK_LT(learned_info->solution.GetCost(), best_cost);
319 best_cost = learned_info->solution.GetCost();
320 } else if (sat_status == sat::SatSolver::INFEASIBLE) {
321 // The solution is proved optimal (if any).
322 learned_info->lower_bound = best_cost;
323 return best_cost == std::numeric_limits<int64_t>::max()
326 }
327
328 // The number of failure is a good approximation of the number of conflicts.
329 // Note that the number of failures of the SAT solver is not reinitialized.
330 remaining_num_conflicts -=
331 sat_propagator_->num_failures() - old_num_failures;
332 }
333
334 // Restore sat_propagator_ to its original state.
335 // Note that if the function is aborted before that, it means we solved the
336 // problem to optimality (or proven it to be infeasible), so we don't need
337 // to do any extra work in these cases since the sat_propagator_ will not be
338 // used anymore.
339 CHECK_EQ(0, sat_propagator_->AssumptionLevel());
340 sat_propagator_->RestoreSolverToAssumptionLevel();
341 sat_propagator_->SetParameters(saved_params);
342 sat_propagator_->ResetDecisionHeuristic();
343 for (const auto [literal, weight] : saved_prefs) {
344 sat_propagator_->SetAssignmentPreference(literal, weight);
345 }
346
347 // This can be proved during the call to RestoreSolverToAssumptionLevel().
348 if (sat_propagator_->IsModelUnsat()) {
349 // The solution is proved optimal (if any).
350 learned_info->lower_bound = best_cost;
351 return best_cost == std::numeric_limits<int64_t>::max()
354 }
355
356 ExtractLearnedInfoFromSatSolver(sat_propagator_, learned_info);
357
358 return solution_found ? BopOptimizerBase::SOLUTION_FOUND
360}
361
362//------------------------------------------------------------------------------
363// LinearRelaxation
364//------------------------------------------------------------------------------
366 absl::string_view name)
367 : BopOptimizerBase(name),
368 parameters_(parameters),
369 state_update_stamp_(ProblemState::kInitialStampValue),
370 lp_model_loaded_(false),
371 num_full_solves_(0),
372 lp_model_(),
373 lp_solver_(),
374 scaling_(1),
375 offset_(0),
376 num_fixed_variables_(-1),
377 problem_already_solved_(false),
378 scaled_solution_cost_(glop::kInfinity) {}
379
381
382BopOptimizerBase::Status LinearRelaxation::SynchronizeIfNeeded(
383 const ProblemState& problem_state) {
384 if (state_update_stamp_ == problem_state.update_stamp()) {
386 }
387 state_update_stamp_ = problem_state.update_stamp();
388
389 // If this is a pure feasibility problem, obey
390 // `BopParameters.max_lp_solve_for_feasibility_problems`.
391 if (problem_state.original_problem().objective().literals_size() == 0 &&
392 parameters_.max_lp_solve_for_feasibility_problems() >= 0 &&
393 num_full_solves_ >= parameters_.max_lp_solve_for_feasibility_problems()) {
395 }
396
397 // Check if the number of fixed variables is greater than last time.
398 // TODO(user): Consider checking changes in number of conflicts too.
399 int num_fixed_variables = 0;
400 for (const bool is_fixed : problem_state.is_fixed()) {
401 if (is_fixed) {
402 ++num_fixed_variables;
403 }
404 }
405 problem_already_solved_ =
406 problem_already_solved_ && num_fixed_variables_ >= num_fixed_variables;
407 if (problem_already_solved_) return BopOptimizerBase::ABORT;
408
409 // Create the LP model based on the current problem state.
410 num_fixed_variables_ = num_fixed_variables;
411 if (!lp_model_loaded_) {
412 lp_model_.Clear();
414 &lp_model_);
415 lp_model_loaded_ = true;
416 }
417 for (VariableIndex var(0); var < problem_state.is_fixed().size(); ++var) {
418 if (problem_state.IsVariableFixed(var)) {
419 const glop::Fractional value =
420 problem_state.GetVariableFixedValue(var) ? 1.0 : 0.0;
421 lp_model_.SetVariableBounds(ColIndex(var.value()), value, value);
422 }
423 }
424
425 // Add learned binary clauses.
426 if (parameters_.use_learned_binary_clauses_in_lp()) {
427 for (const sat::BinaryClause& clause :
428 problem_state.NewlyAddedBinaryClauses()) {
429 const RowIndex constraint_index = lp_model_.CreateNewConstraint();
430 const int64_t coefficient_a = clause.a.IsPositive() ? 1 : -1;
431 const int64_t coefficient_b = clause.b.IsPositive() ? 1 : -1;
432 const int64_t rhs = 1 + (clause.a.IsPositive() ? 0 : -1) +
433 (clause.b.IsPositive() ? 0 : -1);
434 const ColIndex col_a(clause.a.Variable().value());
435 const ColIndex col_b(clause.b.Variable().value());
436 const std::string name_a = lp_model_.GetVariableName(col_a);
437 const std::string name_b = lp_model_.GetVariableName(col_b);
438
439 lp_model_.SetConstraintName(
441 (clause.a.IsPositive() ? name_a : "not(" + name_a + ")") + " or " +
442 (clause.b.IsPositive() ? name_b : "not(" + name_b + ")"));
443 lp_model_.SetCoefficient(constraint_index, col_a, coefficient_a);
444 lp_model_.SetCoefficient(constraint_index, col_b, coefficient_b);
446 }
447 }
448
449 scaling_ = problem_state.original_problem().objective().scaling_factor();
450 offset_ = problem_state.original_problem().objective().offset();
451 scaled_solution_cost_ =
452 problem_state.solution().IsFeasible()
453 ? problem_state.solution().GetScaledCost()
454 : (lp_model_.IsMaximizationProblem() ? -glop::kInfinity
457}
458
459// Always let the LP solver run if there is an objective. If there isn't, only
460// let the LP solver run if the user asked for it by setting
461// `BopParameters.max_lp_solve_for_feasibility_problems` to a non-zero value
462// (a negative value means no limit).
463// TODO(user): also deal with problem_already_solved_
464bool LinearRelaxation::ShouldBeRun(const ProblemState& problem_state) const {
465 return problem_state.original_problem().objective().literals_size() > 0 ||
466 parameters_.max_lp_solve_for_feasibility_problems() != 0;
467}
468
470 const BopParameters& parameters, const ProblemState& problem_state,
471 LearnedInfo* learned_info, TimeLimit* time_limit) {
472 CHECK(learned_info != nullptr);
473 CHECK(time_limit != nullptr);
474 learned_info->Clear();
476 const BopOptimizerBase::Status sync_status =
477 SynchronizeIfNeeded(problem_state);
478 if (sync_status != BopOptimizerBase::CONTINUE) {
479 return sync_status;
480 }
481
482 const glop::ProblemStatus lp_status = Solve(false, time_limit);
483 VLOG(1) << " LP: "
484 << absl::StrFormat("%.6f", lp_solver_.GetObjectiveValue())
485 << " status: " << GetProblemStatusString(lp_status);
486
487 if (lp_status == glop::ProblemStatus::OPTIMAL ||
488 lp_status == glop::ProblemStatus::IMPRECISE) {
489 ++num_full_solves_;
490 problem_already_solved_ = true;
491 }
492
493 if (lp_status == glop::ProblemStatus::INIT) {
495 }
496 if (lp_status != glop::ProblemStatus::OPTIMAL &&
497 lp_status != glop::ProblemStatus::IMPRECISE &&
500 }
501 learned_info->lp_values = lp_solver_.variable_values();
502
503 if (lp_status == glop::ProblemStatus::OPTIMAL) {
504 // The lp returns the objective with the offset and scaled, so we need to
505 // unscale it and then remove the offset.
506 double lower_bound = lp_solver_.GetObjectiveValue();
507 if (parameters_.use_lp_strong_branching()) {
509 ComputeLowerBoundUsingStrongBranching(learned_info, time_limit);
510 VLOG(1) << " LP: "
511 << absl::StrFormat("%.6f", lower_bound)
512 << " using strong branching.";
513 }
514
515 const int tolerance_sign = scaling_ < 0 ? 1 : -1;
516 const double unscaled_cost =
517 (lower_bound +
518 tolerance_sign *
519 lp_solver_.GetParameters().solution_feasibility_tolerance()) /
520 scaling_ -
521 offset_;
522 learned_info->lower_bound = static_cast<int64_t>(ceil(unscaled_cost));
523
524 if (AllIntegralValues(
525 learned_info->lp_values,
526 lp_solver_.GetParameters().primal_feasibility_tolerance())) {
527 DenseRowToBopSolution(learned_info->lp_values, &learned_info->solution);
528 CHECK(learned_info->solution.IsFeasible());
530 }
531 }
532
534}
535
536// TODO(user): It is possible to stop the search earlier using the glop
537// parameter objective_lower_limit / objective_upper_limit. That
538// can be used when a feasible solution is known, or when the false
539// best bound is computed.
540glop::ProblemStatus LinearRelaxation::Solve(bool incremental_solve,
541 TimeLimit* time_limit) {
542 GlopParameters glop_params;
543 if (incremental_solve) {
544 glop_params.set_use_dual_simplex(true);
545 glop_params.set_allow_simplex_algorithm_change(true);
546 glop_params.set_use_preprocessing(false);
547 lp_solver_.SetParameters(glop_params);
548 }
549 NestedTimeLimit nested_time_limit(time_limit, time_limit->GetTimeLeft(),
550 parameters_.lp_max_deterministic_time());
551 const glop::ProblemStatus lp_status = lp_solver_.SolveWithTimeLimit(
552 lp_model_, nested_time_limit.GetTimeLimit());
553 return lp_status;
554}
555
556double LinearRelaxation::ComputeLowerBoundUsingStrongBranching(
557 LearnedInfo* learned_info, TimeLimit* time_limit) {
558 const glop::DenseRow initial_lp_values = lp_solver_.variable_values();
559 const double tolerance =
560 lp_solver_.GetParameters().primal_feasibility_tolerance();
561 double best_lp_objective = lp_solver_.GetObjectiveValue();
562 for (glop::ColIndex col(0); col < initial_lp_values.size(); ++col) {
563 // TODO(user): Order the variables by some meaningful quantity (probably
564 // the cost variation when we snap it to one of its bound) so
565 // we can try the one that seems the most promising first.
566 // That way we can stop the strong branching earlier.
567 if (time_limit->LimitReached()) break;
568
569 // Skip fixed variables.
570 if (lp_model_.variable_lower_bounds()[col] ==
571 lp_model_.variable_upper_bounds()[col]) {
572 continue;
573 }
574 CHECK_EQ(0.0, lp_model_.variable_lower_bounds()[col]);
575 CHECK_EQ(1.0, lp_model_.variable_upper_bounds()[col]);
576
577 // Note(user): Experiments show that iterating on all variables can be
578 // costly and doesn't lead to better solutions when a SAT optimizer is used
579 // afterward, e.g. BopSatLpFirstSolutionGenerator, and no feasible solutions
580 // are available.
581 // No variables are skipped when a feasible solution is know as the best
582 // bound / cost comparison can be used to deduce fixed variables, and be
583 // useful for other optimizers.
584 if ((scaled_solution_cost_ == glop::kInfinity ||
585 scaled_solution_cost_ == -glop::kInfinity) &&
586 (initial_lp_values[col] < tolerance ||
587 initial_lp_values[col] + tolerance > 1)) {
588 continue;
589 }
590
591 double objective_true = best_lp_objective;
592 double objective_false = best_lp_objective;
593
594 // Set to true.
595 lp_model_.SetVariableBounds(col, 1.0, 1.0);
596 const glop::ProblemStatus status_true = Solve(true, time_limit);
597 // TODO(user): Deal with PRIMAL_INFEASIBLE, DUAL_INFEASIBLE and
598 // INFEASIBLE_OR_UNBOUNDED statuses. In all cases, if the
599 // original lp was feasible, this means that the variable can
600 // be fixed to the other bound.
601 if (status_true == glop::ProblemStatus::OPTIMAL ||
602 status_true == glop::ProblemStatus::DUAL_FEASIBLE) {
603 objective_true = lp_solver_.GetObjectiveValue();
604
605 // Set to false.
606 lp_model_.SetVariableBounds(col, 0.0, 0.0);
607 const glop::ProblemStatus status_false = Solve(true, time_limit);
608 if (status_false == glop::ProblemStatus::OPTIMAL ||
609 status_false == glop::ProblemStatus::DUAL_FEASIBLE) {
610 objective_false = lp_solver_.GetObjectiveValue();
611
612 // Compute the new min.
613 best_lp_objective =
614 lp_model_.IsMaximizationProblem()
615 ? std::min(best_lp_objective,
616 std::max(objective_true, objective_false))
617 : std::max(best_lp_objective,
618 std::min(objective_true, objective_false));
619 }
620 }
621
622 if (CostIsWorseThanSolution(objective_true, tolerance)) {
623 // Having variable col set to true can't possibly lead to and better
624 // solution than the current one. Set the variable to false.
625 lp_model_.SetVariableBounds(col, 0.0, 0.0);
626 learned_info->fixed_literals.push_back(
627 sat::Literal(sat::BooleanVariable(col.value()), false));
628 } else if (CostIsWorseThanSolution(objective_false, tolerance)) {
629 // Having variable col set to false can't possibly lead to and better
630 // solution than the current one. Set the variable to true.
631 lp_model_.SetVariableBounds(col, 1.0, 1.0);
632 learned_info->fixed_literals.push_back(
633 sat::Literal(sat::BooleanVariable(col.value()), true));
634 } else {
635 // Unset. This is safe to use 0.0 and 1.0 as the variable is not fixed.
636 lp_model_.SetVariableBounds(col, 0.0, 1.0);
637 }
638 }
639 return best_lp_objective;
640}
641
642bool LinearRelaxation::CostIsWorseThanSolution(double scaled_cost,
643 double tolerance) const {
644 return lp_model_.IsMaximizationProblem()
645 ? scaled_cost + tolerance < scaled_solution_cost_
646 : scaled_cost > scaled_solution_cost_ + tolerance;
647}
648
649} // namespace bop
650} // namespace operations_research
IntegerValue size
int64_t max
int64_t min
@ ABORT
There is no need to call this optimizer again on the same problem state.
Definition bop_base.h:87
BopRandomFirstSolutionGenerator(absl::string_view name, const BopParameters &parameters, sat::SatSolver *sat_propagator, absl::BitGenRef random)
Definition bop_fs.cc:236
Status Optimize(const BopParameters &parameters, const ProblemState &problem_state, LearnedInfo *learned_info, TimeLimit *time_limit) override
Definition bop_fs.cc:251
bool ShouldBeRun(const ProblemState &problem_state) const override
Only run the RandomFirstSolution when there is an objective to minimize.
Definition bop_fs.cc:246
Status Optimize(const BopParameters &parameters, const ProblemState &problem_state, LearnedInfo *learned_info, TimeLimit *time_limit) override
Definition bop_fs.cc:181
bool ShouldBeRun(const ProblemState &problem_state) const override
Definition bop_fs.cc:168
GuidedSatFirstSolutionGenerator(absl::string_view name, Policy policy)
Definition bop_fs.cc:100
Status Optimize(const BopParameters &parameters, const ProblemState &problem_state, LearnedInfo *learned_info, TimeLimit *time_limit) override
Definition bop_fs.cc:475
LinearRelaxation(const BopParameters &parameters, absl::string_view name)
Definition bop_fs.cc:371
bool ShouldBeRun(const ProblemState &problem_state) const override
Definition bop_fs.cc:470
bool IsVariableFixed(VariableIndex var) const
Definition bop_base.h:184
const BopSolution & solution() const
Definition bop_base.h:205
const std::vector< sat::BinaryClause > & NewlyAddedBinaryClauses() const
Returns the newly added binary clause since the last SynchronizationDone().
Definition bop_base.cc:265
std::vector< bool > assignment_preference() const
Definition bop_base.h:139
const glop::DenseRow & lp_values() const
Definition bop_base.h:200
const BopParameters & GetParameters() const
Definition bop_base.h:132
const util_intops::StrongVector< VariableIndex, bool > & is_fixed() const
Definition bop_base.h:185
const sat::LinearBooleanProblem & original_problem() const
Definition bop_base.h:210
bool GetVariableFixedValue(VariableIndex var) const
Definition bop_base.h:191
const GlopParameters & GetParameters() const
Definition lp_solver.cc:138
Fractional GetObjectiveValue() const
Returns the objective value of the solution with its offset and scaling.
Definition lp_solver.cc:527
const DenseRow & variable_values() const
Accessors to information related to variables.
Definition lp_solver.h:109
ABSL_MUST_USE_RESULT ProblemStatus SolveWithTimeLimit(const LinearProgram &lp, TimeLimit *time_limit)
Definition lp_solver.cc:150
void SetParameters(const GlopParameters &parameters)
Definition lp_solver.cc:126
void Clear()
Clears, i.e. reset the object to its initial value.
Definition lp_data.cc:143
void SetConstraintName(RowIndex row, absl::string_view name)
Definition lp_data.cc:254
const DenseRow & variable_lower_bounds() const
Definition lp_data.h:237
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition lp_data.cc:258
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition lp_data.cc:318
void SetCoefficient(RowIndex row, ColIndex col, Fractional value)
Defines the coefficient for col / row.
Definition lp_data.cc:326
std::string GetVariableName(ColIndex col) const
Definition lp_data.cc:369
const DenseRow & variable_upper_bounds() const
Definition lp_data.h:240
Status SolveWithTimeLimit(TimeLimit *time_limit)
void SetAssignmentPreference(Literal literal, float weight)
Definition sat_solver.h:164
void Backtrack(int target_level)
const SatParameters & parameters() const
std::vector< std::pair< Literal, float > > AllPreferences() const
Definition sat_solver.h:167
const VariablesAssignment & Assignment() const
Definition sat_solver.h:388
void SetParameters(const SatParameters &parameters)
SatParameters parameters
const std::string name
A name for logging purposes.
int64_t value
IntVar * var
double lower_bound
int constraint_index
int literal
time_limit
Definition solve.cc:22
ColIndex col
Definition markowitz.cc:187
double solution
BopOptimizerBase::Status LoadStateProblemToSatSolver(const ProblemState &problem_state, sat::SatSolver *sat_solver)
Definition bop_util.cc:98
void SatAssignmentToBopSolution(const sat::VariablesAssignment &assignment, BopSolution *solution)
Definition bop_util.cc:132
void ExtractLearnedInfoFromSatSolver(sat::SatSolver *solver, LearnedInfo *info)
Definition bop_util.cc:109
constexpr double kInfinity
Infinity for type Fractional.
Definition lp_types.h:89
StrictITIVector< ColIndex, Fractional > DenseRow
Row-vector types. Row-vector types are indexed by a column index.
Definition lp_types.h:353
ProblemStatus
Different statuses for a given problem.
Definition lp_types.h:107
@ INIT
The solver didn't had a chance to prove anything.
For infeasible and unbounded see Not checked if options check_solutions_if_inf_or_unbounded and the If options first_solution_only is false
problem is infeasible or unbounded (default).
Definition matchers.h:468
SolutionStatus
Feasibility of a primal or dual solution as claimed by the solver.
Definition solution.h:39
void RandomizeDecisionHeuristic(absl::BitGenRef random, SatParameters *parameters)
Randomizes the decision heuristic of the given SatParameters.
Definition util.cc:106
void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem &problem, glop::LinearProgram *lp)
Converts a Boolean optimization problem to its lp formulation.
Definition lp_utils.cc:1639
void FindLinearBooleanProblemSymmetries(const LinearBooleanProblem &problem, std::vector< std::unique_ptr< SparsePermutation > > *generators)
void UseObjectiveForSatAssignmentPreference(const LinearBooleanProblem &problem, SatSolver *solver)
In SWIG mode, we don't want anything besides these top-level includes.
STL namespace.
int64_t weight
Definition pack.cc:510
Fractional scaled_cost
int64_t lower_bound
A lower bound (for multi-threading purpose).
Definition bop_base.h:279
BopSolution solution
New solution. Note that the solution might be infeasible.
Definition bop_base.h:276