Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
feasibility_jump.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdlib.h>
17
18#include <algorithm>
19#include <atomic>
20#include <cmath>
21#include <cstdint>
22#include <functional>
23#include <limits>
24#include <memory>
25#include <string>
26#include <tuple>
27#include <utility>
28#include <vector>
29
30#include "absl/functional/any_invocable.h"
31#include "absl/functional/bind_front.h"
32#include "absl/functional/function_ref.h"
33#include "absl/log/check.h"
34#include "absl/random/bit_gen_ref.h"
35#include "absl/random/distributions.h"
36#include "absl/strings/str_cat.h"
37#include "absl/types/span.h"
42#include "ortools/sat/cp_model.pb.h"
47#include "ortools/sat/sat_parameters.pb.h"
50#include "ortools/sat/util.h"
53
55
56namespace {
57// How much do we discount moves we might fix later.
58constexpr double kCompoundDiscount = 1. / 1024;
59} // namespace
60
62 absl::AnyInvocable<std::pair<int64_t, double>(int) const> compute_jump) {
63 compute_jump_ = std::move(compute_jump);
64}
65
66void JumpTable::RecomputeAll(int num_variables) {
67 deltas_.resize(num_variables);
68 scores_.resize(num_variables);
69 needs_recomputation_.assign(num_variables, true);
70}
71
72void JumpTable::SetJump(int var, int64_t delta, double score) {
73 deltas_[var] = delta;
74 scores_[var] = score;
75 needs_recomputation_[var] = false;
76}
77
78void JumpTable::Recompute(int var) { needs_recomputation_[var] = true; }
79
80bool JumpTable::JumpIsUpToDate(int var) const {
81 const auto& [delta, score] = compute_jump_(var);
82 if (delta != deltas_[var]) {
83 LOG(ERROR) << "Incorrect delta for var " << var << ": " << deltas_[var]
84 << " (should be " << delta << ")";
85 }
86 bool score_ok = true;
87 if (abs(score - scores_[var]) / std::max(abs(score), 1.0) > 1e-2) {
88 score_ok = false;
89 LOG(ERROR) << "Incorrect score for var " << var << ": " << scores_[var]
90 << " (should be " << score << ") "
91 << " delta = " << delta;
92 }
93 return delta == deltas_[var] && score_ok;
94}
95
96std::pair<int64_t, double> JumpTable::GetJump(int var) {
97 if (needs_recomputation_[var]) {
98 needs_recomputation_[var] = false;
99 std::tie(deltas_[var], scores_[var]) = compute_jump_(var);
100 }
101 return std::make_pair(deltas_[var], scores_[var]);
102}
103
105 // Do a final collection.
106 for (int i = 0; i < states_.size(); ++i) {
107 CollectStatistics(*states_[i].get());
108 }
109
110 // Display aggregated states
111 for (const auto& [options, counters] : options_to_stats_) {
112 stat_tables_->AddLsStat(
113 absl::StrCat(name_, "_", options.name()), counters.num_batches,
114 options_to_num_restarts_[options] + counters.num_perturbations,
115 counters.num_linear_moves, counters.num_general_moves,
116 counters.num_compound_moves, counters.num_backtracks,
117 counters.num_weight_updates, counters.num_scores_computed);
118 }
119}
120
122 stat_tables_->AddTimingStat(*this);
123}
124
125void FeasibilityJumpSolver::ImportState() {
126 state_ = states_->GetNextState();
127 if (state_->move == nullptr) {
128 const int num_variables = var_domains_.size();
129 state_->move = std::make_unique<CompoundMoveBuilder>(num_variables);
130 }
131}
132
133void FeasibilityJumpSolver::ReleaseState() { states_->Release(state_); }
134
135void FeasibilityJumpSolver::Initialize() {
136 is_initialized_ = true;
137
138 // For now we just disable or enable it.
139 // But in the future we might have more variation.
140 if (params_.feasibility_jump_linearization_level() == 0) {
141 evaluator_ =
142 std::make_unique<LsEvaluator>(linear_model_->model_proto(), params_);
143 } else {
144 evaluator_ =
145 std::make_unique<LsEvaluator>(linear_model_->model_proto(), params_,
146 linear_model_->ignored_constraints(),
147 linear_model_->additional_constraints());
148 }
149
150 const int num_variables = linear_model_->model_proto().variables().size();
151 var_domains_.resize(num_variables);
152 for (int v = 0; v < num_variables; ++v) {
153 var_domains_.Set(
154 v, ReadDomainFromProto(linear_model_->model_proto().variables(v)));
155 }
156 var_domains_.InitializeObjective(linear_model_->model_proto());
157
158 vars_to_scan_.ClearAndReserve(num_variables);
159 var_occurs_in_non_linear_constraint_.resize(num_variables);
160 for (int c = 0; c < evaluator_->NumNonLinearConstraints(); ++c) {
161 for (int v : evaluator_->GeneralConstraintToVars(c)) {
162 var_occurs_in_non_linear_constraint_[v] = true;
163 }
164 }
165}
166
167namespace {
168
169int64_t ComputeRange(int64_t range, double range_ratio) {
170 return static_cast<int64_t>(
171 std::ceil(static_cast<double>(range) * range_ratio));
172}
173
174// TODO(user): Optimize and move to the Domain class.
175// TODO(user): Improve entropy on non continuous domains.
176int64_t RandomValueNearMin(const Domain& domain, double range_ratio,
177 absl::BitGenRef random) {
178 if (domain.Size() == 1) return domain.FixedValue();
179 if (domain.Size() == 2) {
180 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Min()
181 : domain.Max();
182 }
183 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
184 return domain.ValueAtOrBefore(domain.Min() +
185 absl::LogUniform<int64_t>(random, 0, range));
186}
187
188int64_t RandomValueNearMax(const Domain& domain, double range_ratio,
189 absl::BitGenRef random) {
190 if (domain.Size() == 1) return domain.FixedValue();
191 if (domain.Size() == 2) {
192 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Max()
193 : domain.Min();
194 }
195 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
196 return domain.ValueAtOrAfter(domain.Max() -
197 absl::LogUniform<int64_t>(random, 0, range));
198}
199
200int64_t RandomValueNearValue(const Domain& domain, int64_t value,
201 double range_ratio, absl::BitGenRef random) {
202 DCHECK(!domain.IsFixed());
203
204 if (domain.Min() >= value) {
205 return RandomValueNearMin(domain, range_ratio, random);
206 }
207
208 if (domain.Max() <= value) {
209 return RandomValueNearMax(domain, range_ratio, random);
210 }
211
212 // Split up or down, and choose value in split domain.
213 const Domain greater_domain =
214 domain.IntersectionWith({value + 1, domain.Max()});
215 const double choose_greater_probability =
216 static_cast<double>(greater_domain.Size()) /
217 static_cast<double>(domain.Size() - 1);
218 if (absl::Bernoulli(random, choose_greater_probability)) {
219 return RandomValueNearMin(greater_domain, range_ratio, random);
220 } else {
221 return RandomValueNearMax(
222 domain.IntersectionWith({domain.Min(), value - 1}), range_ratio,
223 random);
224 }
225}
226
227} // namespace
228
229void FeasibilityJumpSolver::ResetCurrentSolution(
230 bool use_hint, bool use_objective, double perturbation_probability) {
231 const int num_variables = linear_model_->model_proto().variables().size();
232 const double default_value_probability = 1.0 - perturbation_probability;
233 const double range_ratio =
234 params_.feasibility_jump_var_perburbation_range_ratio();
235 std::vector<int64_t>& solution = state_->solution;
236 state_->base_solution = nullptr;
237
238 // Resize the solution if needed.
239 solution.resize(num_variables);
240
241 // Starts with values closest to zero.
242 for (int var = 0; var < num_variables; ++var) {
243 if (var_domains_[var].IsFixed()) {
244 solution[var] = var_domains_[var].FixedValue();
245 continue;
246 }
247
248 if (absl::Bernoulli(random_, default_value_probability)) {
249 solution[var] = var_domains_[var].SmallestValue();
250 } else {
251 solution[var] =
252 RandomValueNearValue(var_domains_[var], 0, range_ratio, random_);
253 }
254 }
255
256 // Use objective half of the time (if the model has one).
257 if (use_objective && linear_model_->model_proto().has_objective()) {
258 const int num_terms =
259 linear_model_->model_proto().objective().vars().size();
260 for (int i = 0; i < num_terms; ++i) {
261 const int var = linear_model_->model_proto().objective().vars(i);
262 if (var_domains_[var].IsFixed()) continue;
263
264 if (linear_model_->model_proto().objective().coeffs(i) > 0) {
265 if (absl::Bernoulli(random_, default_value_probability)) {
266 solution[var] = var_domains_[var].Min();
267 } else {
268 solution[var] =
269 RandomValueNearMin(var_domains_[var], range_ratio, random_);
270 }
271 } else {
272 if (absl::Bernoulli(random_, default_value_probability)) {
273 solution[var] = var_domains_[var].Max();
274 } else {
275 solution[var] =
276 RandomValueNearMax(var_domains_[var], range_ratio, random_);
277 }
278 }
279 }
280 }
281
282 if (use_hint && linear_model_->model_proto().has_solution_hint()) {
283 const auto& hint = linear_model_->model_proto().solution_hint();
284 for (int i = 0; i < hint.vars().size(); ++i) {
285 solution[hint.vars(i)] = hint.values(i);
286 }
287 }
288}
289
290void FeasibilityJumpSolver::PerturbateCurrentSolution(
291 double perturbation_probability) {
292 if (perturbation_probability == 0.0) return;
293 const int num_variables = linear_model_->model_proto().variables().size();
294 const double perturbation_ratio =
295 params_.feasibility_jump_var_perburbation_range_ratio();
296 std::vector<int64_t>& solution = state_->solution;
297 for (int var = 0; var < num_variables; ++var) {
298 if (var_domains_[var].IsFixed()) continue;
299 if (absl::Bernoulli(random_, perturbation_probability)) {
300 solution[var] = RandomValueNearValue(var_domains_[var], solution[var],
301 perturbation_ratio, random_);
302 }
303 }
304}
305
306std::string FeasibilityJumpSolver::OneLineStats() const {
307 // Moves and evaluations in the general iterations.
308 const std::string general_str =
309 state_->counters.num_general_evals == 0 &&
310 state_->counters.num_general_moves == 0
311 ? ""
312 : absl::StrCat(
313 " gen{mvs:", FormatCounter(state_->counters.num_general_moves),
314 " evals:", FormatCounter(state_->counters.num_general_evals),
315 "}");
316 const std::string compound_str =
317 state_->counters.num_compound_moves == 0 &&
318 state_->counters.num_backtracks == 0
319 ? ""
320 : absl::StrCat(" comp{mvs:",
321 FormatCounter(state_->counters.num_compound_moves),
322 " btracks:",
323 FormatCounter(state_->counters.num_backtracks), "}");
324
325 // Improving jumps and infeasible constraints.
326 const int num_infeasible_cts = evaluator_->NumInfeasibleConstraints();
327 const std::string non_solution_str =
328 num_infeasible_cts == 0
329 ? ""
330 : absl::StrCat(" #good_moves:", FormatCounter(vars_to_scan_.size()),
331 " #inf_cts:",
332 FormatCounter(evaluator_->NumInfeasibleConstraints()));
333
334 return absl::StrCat(
335 "batch:", state_->counters.num_batches,
336 " lin{mvs:", FormatCounter(state_->counters.num_linear_moves),
337 " evals:", FormatCounter(state_->counters.num_linear_evals), "}",
338 general_str, compound_str, non_solution_str,
339 " #w_updates:", FormatCounter(state_->counters.num_weight_updates),
340 " #perturb:", FormatCounter(state_->counters.num_perturbations));
341}
342
343std::function<void()> FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) {
344 task_generated_ = true; // Atomic.
345
346 return [this] {
347 // We delay initialization to the first task as it might be a bit slow
348 // to scan the whole model, so we want to do this part in parallel.
349 if (!is_initialized_) Initialize();
350
351 // Load the next state to work on.
352 ImportState();
353
354 // If we found a new best solution, we will restart all violation ls (we
355 // still finish each batch though). We will also reset the luby sequence.
356 bool new_best_solution_was_found = false;
357 if (type() == SubSolver::INCOMPLETE) {
358 const int64_t best =
359 shared_response_->SolutionsRepository().GetBestRank();
360 if (best < state_->last_solution_rank) {
361 states_->ResetLubyCounter();
362 new_best_solution_was_found = true;
363 state_->last_solution_rank = best;
364 }
365 }
366
367 bool reset_weights = false;
368 if (new_best_solution_was_found || state_->num_batches_before_change <= 0) {
369 reset_weights = true;
370 if (state_->options.use_restart) {
371 states_->CollectStatistics(*state_);
372 state_->options.Randomize(params_, &random_);
373 state_->counters = LsCounters(); // Reset.
374 } else {
375 state_->options.Randomize(params_, &random_);
376 }
377 if (type() == SubSolver::INCOMPLETE) {
378 // This is not used once we have a solution, and setting it to false
379 // allow to fix the logs.
380 state_->options.use_objective = false;
381 }
382
383 const bool first_time = (state_->num_restarts == 0);
384 if (state_->options.use_restart || first_time ||
385 new_best_solution_was_found) {
386 if (type() == SubSolver::INCOMPLETE) {
387 // Choose a base solution for this neighborhood.
388 std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
389 solution = shared_response_->SolutionsRepository()
390 .GetRandomBiasedSolution(random_);
391 state_->solution = solution->variable_values;
392 state_->base_solution = solution;
393 ++state_->num_solutions_imported;
394 } else {
395 if (!first_time) {
396 // Register this solution before we reset the search.
397 const int num_violations = evaluator_->ViolatedConstraints().size();
398 shared_hints_->AddSolution(state_->solution, num_violations);
399 }
400 ResetCurrentSolution(/*use_hint=*/first_time,
401 state_->options.use_objective,
402 state_->options.perturbation_probability);
403 }
404 } else {
405 PerturbateCurrentSolution(
406 params_.feasibility_jump_var_randomization_probability());
407 }
408
409 if (state_->options.use_restart) {
410 ++state_->num_restarts;
411 states_->ConfigureNextLubyRestart(state_);
412 } else {
413 // TODO(user): Tune the improvement constant, maybe use luby.
414 ++state_->counters.num_perturbations;
415 state_->num_batches_before_change =
416 params_.violation_ls_perturbation_period();
417 }
418 }
419
420 // Between chunk, we synchronize bounds.
421 bool recompute_compound_weights = false;
422 if (linear_model_->model_proto().has_objective()) {
423 const IntegerValue lb = shared_response_->GetInnerObjectiveLowerBound();
424 const IntegerValue ub = shared_response_->GetInnerObjectiveUpperBound();
425 if (lb != state_->saved_inner_objective_lb ||
426 ub != state_->saved_inner_objective_ub) {
427 recompute_compound_weights = true;
428 }
429 state_->saved_inner_objective_lb = lb;
430 state_->saved_inner_objective_ub = ub;
431
432 if (ub < lb) return; // Search is finished.
433 if (evaluator_->ReduceObjectiveBounds(lb.value(), ub.value())) {
434 recompute_compound_weights = true;
435 }
436 }
437
438 // Update the variable domains with the last information.
439 if (!var_domains_.UpdateFromSharedBounds()) return;
440
441 // Checks the current solution is compatible with updated domains.
442 {
443 // Make sure the solution is within the potentially updated domain.
444 // This also initialize var_domains_.CanIncrease()/CanDecrease().
445 const int num_vars = state_->solution.size();
446 for (int var = 0; var < num_vars; ++var) {
447 const int64_t old_value = state_->solution[var];
448 const int64_t new_value = var_domains_[var].ClosestValue(old_value);
449 if (new_value != old_value) {
450 state_->solution[var] = new_value;
451 recompute_compound_weights = true;
452 }
453 var_domains_.OnValueChange(var, new_value);
454 }
455 // Check if compound move search might backtrack out of the new domains.
456 if (!state_->move->StackValuesInDomains(var_domains_.AsSpan())) {
457 recompute_compound_weights = true;
458 }
459 }
460
461 // Search for feasible solution.
462 // We always recompute that since we might have loaded from a different
463 // state.
464 evaluator_->ComputeAllViolations(state_->solution);
465
466 if (reset_weights) {
467 state_->bump_value = 1.0;
468 state_->weights.assign(evaluator_->NumEvaluatorConstraints(), 1.0);
469 recompute_compound_weights = true;
470 }
471 if (recompute_compound_weights) {
472 state_->move->Clear();
473 if (state_->options.use_compound_moves) {
474 state_->compound_weights.assign(state_->weights.begin(),
475 state_->weights.end());
476 for (int c = 0; c < state_->weights.size(); ++c) {
477 if (evaluator_->IsViolated(c)) continue;
478 state_->compound_weights[c] *= kCompoundDiscount;
479 }
480 state_->compound_weight_changed.clear();
481 state_->in_compound_weight_changed.assign(state_->weights.size(),
482 false);
483 state_->compound_move_max_discrepancy = 0;
484 }
485 }
486
487 if (!state_->options.use_compound_moves) {
488 DCHECK_EQ(state_->move->Size(), 0);
489 }
490
491 ++state_->counters.num_batches;
492 if (DoSomeLinearIterations() && DoSomeGeneralIterations()) {
493 // Checks for infeasibility induced by the non supported constraints.
494 if (SolutionIsFeasible(linear_model_->model_proto(), state_->solution)) {
495 auto pointers = PushAndMaybeCombineSolution(
496 shared_response_, linear_model_->model_proto(), state_->solution,
497 absl::StrCat(name(), "_", state_->options.name(), "(",
498 OneLineStats(), ")"),
499 state_->base_solution == nullptr
500 ? absl::Span<const int64_t>()
501 : state_->base_solution->variable_values,
502 /*model=*/nullptr);
503 // If we pushed a new solution, we use it as a new "base" so that we
504 // will have a smaller delta on the next solution we find.
505 state_->base_solution = pointers.pushed_solution;
506 } else {
507 shared_response_->LogMessage(name(), "infeasible solution. Aborting.");
508 model_is_supported_ = false;
509 }
510 }
511
512 // Update dtime.
513 // Since we execute only one task at the time, this is safe.
514 {
515 // TODO(user): Find better names. DeterministicTime() is maintained by
516 // this class while deterministic_time() is the one saved in the SubSolver
517 // base class).
518 const double current_dtime = DeterministicTime();
519 const double delta = current_dtime - deterministic_time();
520
521 // Because deterministic_time() is computed with a sum of difference, it
522 // might be slighlty different than DeterministicTime() and we don't want
523 // to go backward, even by 1e-18.
524 if (delta >= 0) {
526 shared_time_limit_->AdvanceDeterministicTime(delta);
527 }
528 }
529
530 ReleaseState();
531 task_generated_ = false; // Atomic.
532 };
533}
534
535double FeasibilityJumpSolver::ComputeScore(absl::Span<const double> weights,
536 int var, int64_t delta,
537 bool linear_only) {
539 double score = evaluator_->WeightedViolationDelta(
540 linear_only, weights, var, delta, absl::MakeSpan(state_->solution));
541 constexpr double kEpsilon = 1.0 / std::numeric_limits<int64_t>::max();
542 score += kEpsilon * delta * evaluator_->ObjectiveCoefficient(var);
543 return score;
544}
545
546std::pair<int64_t, double> FeasibilityJumpSolver::ComputeLinearJump(int var) {
547 DCHECK(!var_domains_[var].IsFixed());
548 const int64_t current_value = state_->solution[var];
549
550 ++state_->counters.num_linear_evals;
551 const LinearIncrementalEvaluator& linear_evaluator =
552 evaluator_->LinearEvaluator();
553
554 if (var_domains_.HasTwoValues(var)) {
555 const int64_t min_value = var_domains_[var].Min();
556 const int64_t max_value = var_domains_[var].Max();
557 const int64_t delta = current_value == min_value ? max_value - min_value
558 : min_value - max_value;
559 return std::make_pair(
560 delta, ComputeScore(ScanWeights(), var, delta, /*linear_only=*/true));
561 }
562
563 // In practice, after a few iterations, the chance of finding an improving
564 // move is slim, and we can test that fairly easily with at most two
565 // queries!
566 //
567 // Tricky/Annoying: if the value is not in the domain, we returns it.
568 const int64_t p1 = var_domains_[var].ValueAtOrBefore(current_value - 1);
569 const int64_t p2 = var_domains_[var].ValueAtOrAfter(current_value + 1);
570
571 std::pair<int64_t, double> best_jump;
572 const double v1 = var_domains_[var].Contains(p1)
573 ? ComputeScore(ScanWeights(), var, p1 - current_value,
574 /*linear_only=*/true)
575 : std::numeric_limits<double>::infinity();
576 if (v1 < 0.0) {
577 // Point p1 is improving. Look for best before it.
578 // Note that we can exclude all point after current_value since it is
579 // worse and we assume convexity.
580 const Domain dom = var_domains_[var].IntersectionWith(
581 Domain(std::numeric_limits<int64_t>::min(), p1 - 1));
582 if (dom.IsEmpty()) {
583 best_jump = {p1, v1};
584 } else {
585 tmp_breakpoints_ =
586 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
588 /*is_to_the_right=*/true, {p1, v1}, tmp_breakpoints_,
589 [this, var, current_value](int64_t jump_value) {
590 return ComputeScore(ScanWeights(), var, jump_value - current_value,
591 /*linear_only=*/true);
592 });
593 }
594 } else {
595 const double v2 = var_domains_[var].Contains(p2)
596 ? ComputeScore(ScanWeights(), var, p2 - current_value,
597 /*linear_only=*/true)
598 : std::numeric_limits<double>::infinity();
599 if (v2 < 0.0) {
600 // Point p2 is improving. Look for best after it.
601 // Similarly, we exclude the other points by convexity.
602 const Domain dom = var_domains_[var].IntersectionWith(
603 Domain(p2 + 1, std::numeric_limits<int64_t>::max()));
604 if (dom.IsEmpty()) {
605 best_jump = {p2, v2};
606 } else {
607 tmp_breakpoints_ =
608 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
610 /*is_to_the_right=*/false, {p2, v2}, tmp_breakpoints_,
611 [this, var, current_value](int64_t jump_value) {
612 return ComputeScore(ScanWeights(), var,
613 jump_value - current_value,
614 /*linear_only=*/true);
615 });
616 }
617 } else {
618 // We have no improving point, result is either p1 or p2. This is the
619 // most common scenario, and require no breakpoint computation!
620 // Choose the direction which increases violation the least,
621 // disambiguating by best objective.
622 if (v1 < v2) {
623 best_jump = {p1, v1};
624 } else {
625 best_jump = {p2, v2};
626 }
627 }
628 }
629 DCHECK_NE(best_jump.first, current_value);
630 return std::make_pair(best_jump.first - current_value, best_jump.second);
631}
632
633std::pair<int64_t, double> FeasibilityJumpSolver::ComputeGeneralJump(int var) {
634 if (!var_occurs_in_non_linear_constraint_[var]) {
635 return ComputeLinearJump(var);
636 }
637 Domain domain = var_domains_[var];
638 if (domain.IsFixed()) return std::make_pair(0, 0.0);
639
640 ++state_->counters.num_general_evals;
641 const int64_t current_value = state_->solution[var];
642 domain = domain.IntersectionWith(
643 Domain(current_value, current_value).Complement());
644 std::pair<int64_t, double> result;
645 for (int i = 0; i < domain.NumIntervals(); ++i) {
646 const int64_t min_delta = domain[i].start - current_value;
647 const int64_t max_delta = domain[i].end - current_value;
648 const auto& [delta, score] = RangeConvexMinimum<int64_t, double>(
649 min_delta, max_delta + 1, [&](int64_t delta) -> double {
650 return ComputeScore(ScanWeights(), var, delta, /*linear_only=*/false);
651 });
652 if (i == 0 || score < result.second) {
653 result = std::make_pair(delta, score);
654 }
655 }
656 DCHECK(domain.Contains(current_value + result.first))
657 << current_value << "+" << result.first << " not in domain "
658 << domain.ToString();
659 return result;
660}
661
662void FeasibilityJumpSolver::UpdateViolatedConstraintWeights() {
663 ++state_->counters.num_weight_updates;
664
665 // Because we update the weight incrementally, it is better to not have a
666 // super high magnitude, otherwise doing +max_weight and then -max_weight
667 // will just ignore any constraint with a small weight and our
668 // DCHECK(JumpIsUpToDate(var)) will fail more often.
669 const double kMaxWeight = 1e10;
670 const double kBumpFactor = 1.0 / params_.feasibility_jump_decay();
671 const int num_variables = var_domains_.size();
672 if (state_->options.use_decay) {
673 state_->bump_value *= kBumpFactor;
674 }
675
676 // Note that ViolatedConstraints() might contain only linear constraint
677 // depending on how it was initialized and updated.
678 bool rescale = false;
679 num_ops_ += evaluator_->ViolatedConstraints().size();
680 for (const int c : evaluator_->ViolatedConstraints()) {
681 DCHECK(evaluator_->IsViolated(c));
682 if (state_->options.use_compound_moves) {
683 DCHECK_EQ(state_->compound_weights[c], state_->weights[c]);
684 }
685 state_->weights[c] += state_->bump_value;
686 if (state_->options.use_compound_moves) {
687 state_->compound_weights[c] = state_->weights[c];
688 }
689 if (state_->weights[c] > kMaxWeight) {
690 rescale = true;
691 }
692 }
693
694 if (rescale) {
695 const double factor = 1.0 / kMaxWeight;
696 state_->bump_value *= factor;
697 for (int c = 0; c < state_->weights.size(); ++c) {
698 state_->weights[c] *= factor;
699 if (state_->options.use_compound_moves) {
700 state_->compound_weights[c] *= factor;
701 }
702 }
703 jumps_.RecomputeAll(num_variables);
704 return;
705 }
706
707 // Update weight incrementally.
708 //
709 // To maximize floating point precision, we compute the change to jump value
710 // first and then apply it in one go. Also, in most situation the change is
711 // purely integer and should fit exactly on a double, so we don't depend on
712 // the order in which constraint are listed.
713 LinearIncrementalEvaluator* linear_evaluator =
714 evaluator_->MutableLinearEvaluator();
715 linear_evaluator->ClearAffectedVariables();
716 for_weight_update_.resize(num_variables);
717 num_ops_ += evaluator_->ViolatedConstraints().size();
718 for (const int c : evaluator_->ViolatedConstraints()) {
719 if (c < evaluator_->NumLinearConstraints()) {
720 linear_evaluator->UpdateScoreOnWeightUpdate(
721 c, jumps_.Deltas(), absl::MakeSpan(for_weight_update_));
722 } else {
723 for (const int v : evaluator_->ConstraintToVars(c)) {
724 jumps_.Recompute(v);
725 AddVarToScan(v);
726 }
727 }
728 }
729
730 // Recompute the affected jumps.
731 // Note that the constraint violations are unaffected.
732 absl::Span<double> scores = jumps_.MutableScores();
733 for (const int var : linear_evaluator->VariablesAffectedByLastUpdate()) {
734 // Apply the delta.
735 //
736 // TODO(user): We could compute the minimal bump that would lead to a
737 // good move. That might change depending on the jump value though, so
738 // we can only do that easily for Booleans.
739 if (!var_domains_.HasTwoValues(var)) {
740 jumps_.Recompute(var);
741 } else {
742 // We may know the correct score for binary vars.
743 scores[var] += state_->bump_value * for_weight_update_[var];
744 }
745 AddVarToScan(var);
746 }
747}
748
749bool FeasibilityJumpSolver::DoSomeLinearIterations() {
750 if (VLOG_IS_ON(1)) {
751 shared_response_->LogMessageWithThrottling(name(), OneLineStats());
752 }
753
754 // TODO(user): It should be possible to support compound moves with
755 // the specialized linear code, but lets keep it simpler for now.
756 if (state_->options.use_compound_moves) return true;
757
758 evaluator_->RecomputeViolatedList(/*linear_only=*/true);
759 jumps_.SetComputeFunction(
760 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
761 RecomputeVarsToScan();
762
763 // Do a batch of a given dtime.
764 // Outer loop: when no more greedy moves, update the weight.
765 const double dtime_threshold =
766 DeterministicTime() + params_.feasibility_jump_batch_dtime();
767 while (DeterministicTime() < dtime_threshold) {
768 // Inner loop: greedy descent.
769 while (DeterministicTime() < dtime_threshold) {
770 // Take the best jump score amongst some random candidates.
771 // It is okay if we pick twice the same, we don't really care.
772 int best_var;
773 int64_t best_value;
774 double best_score;
775 if (!ScanRelevantVariables(/*num_to_scan=*/5, &best_var, &best_value,
776 &best_score)) {
777 break;
778 }
779
780 // Perform the move.
781 ++state_->counters.num_linear_moves;
782 const int64_t prev_value = state_->solution[best_var];
783 state_->solution[best_var] = best_value;
784 evaluator_->UpdateLinearScores(best_var, prev_value, best_value,
785 state_->weights, jumps_.Deltas(),
786 jumps_.MutableScores());
787 evaluator_->UpdateViolatedList();
788 var_domains_.OnValueChange(best_var, best_value);
789
790 MarkJumpsThatNeedToBeRecomputed(best_var);
791 if (var_domains_.HasTwoValues(best_var)) {
792 // We already know the score of undoing the move we just did, and that
793 // this is optimal.
794 jumps_.SetJump(best_var, prev_value - best_value, -best_score);
795 }
796 }
797 if (time_limit_crossed_) return false;
798
799 // We will update the weight unless the queue is non-empty.
800 if (vars_to_scan_.empty()) {
801 // Note that we only count linear constraint as violated here.
802 if (evaluator_->ViolatedConstraints().empty()) return true;
803 UpdateViolatedConstraintWeights();
804 }
805 }
806 return false;
807}
808
809// Update the jump scores.
810//
811// We incrementally maintain the score (except for best_var).
812// However for non-Boolean, we still need to recompute the jump value.
813// We will do that in a lazy fashion.
814//
815// TODO(user): In the paper, they just recompute the scores and only
816// change the jump values when the constraint weight changes. Experiment?
817// Note however that the current code is quite fast.
818//
819// TODO(user): For non-Boolean, we could easily detect if a non-improving
820// score cannot become improving. We don't need to add such variable to
821// the queue.
822void FeasibilityJumpSolver::MarkJumpsThatNeedToBeRecomputed(int changed_var) {
823 // To keep DCHECKs happy. Note that we migh overwrite this afterwards with the
824 // known score/jump of undoing the move.
825 jumps_.Recompute(changed_var);
826
827 // Generic part.
828 // No optimization there, we just update all touched variables.
829 // We need to do this before the Linear part, so that the status is correct in
830 // AddVarToScan() for variable with two values.
831 num_ops_ += evaluator_->VarToGeneralConstraints(changed_var).size();
832 for (const int c : evaluator_->VarToGeneralConstraints(changed_var)) {
833 num_ops_ += evaluator_->GeneralConstraintToVars(c).size();
834 for (const int var : evaluator_->GeneralConstraintToVars(c)) {
835 jumps_.Recompute(var);
836 AddVarToScan(var);
837 }
838 }
839
840 // Linear part.
841 num_ops_ += evaluator_->VariablesAffectedByLastLinearUpdate().size();
842 for (const int var : evaluator_->VariablesAffectedByLastLinearUpdate()) {
843 if (!var_domains_.HasTwoValues(var)) {
844 jumps_.Recompute(var);
845 }
846 AddVarToScan(var);
847 }
848}
849
850bool FeasibilityJumpSolver::DoSomeGeneralIterations() {
851 if (!state_->options.use_compound_moves &&
852 evaluator_->NumNonLinearConstraints() == 0) {
853 return true;
854 }
855 // Non-linear constraints are not evaluated in the linear phase.
856 evaluator_->ComputeAllNonLinearViolations(state_->solution);
857 evaluator_->RecomputeViolatedList(/*linear_only=*/false);
858 if (evaluator_->NumNonLinearConstraints() == 0) {
859 jumps_.SetComputeFunction(
860 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
861 } else {
862 jumps_.SetComputeFunction(
863 absl::bind_front(&FeasibilityJumpSolver::ComputeGeneralJump, this));
864 }
865 RecomputeVarsToScan();
866
867 const double dtime_threshold =
868 DeterministicTime() + params_.feasibility_jump_batch_dtime();
869 while (DeterministicTime() < dtime_threshold) {
870 int var;
871 int64_t new_value;
872 double score;
873 const bool found_move = ScanRelevantVariables(
874 /*num_to_scan=*/3, &var, &new_value, &score);
875 const bool backtrack =
876 !found_move && state_->move->Backtrack(&var, &new_value, &score);
877 if (found_move || backtrack) {
878 if (backtrack) ++state_->counters.num_backtracks;
879 DCHECK_NE(var, -1) << var << " " << found_move << " " << backtrack;
880
881 // Perform the move.
882 ++state_->counters.num_general_moves;
883 const int64_t prev_value = state_->solution[var];
884 DCHECK_NE(prev_value, new_value);
885 state_->solution[var] = new_value;
886
887 // Update the linear part and non-linear part.
888 evaluator_->UpdateLinearScores(var, prev_value, new_value, ScanWeights(),
889 jumps_.Deltas(), jumps_.MutableScores());
890 evaluator_->UpdateNonLinearViolations(var, prev_value, state_->solution);
891 evaluator_->UpdateViolatedList();
892 var_domains_.OnValueChange(var, new_value);
893
894 if (state_->options.use_compound_moves && !backtrack) {
895 // `!backtrack` is just an optimisation - we can never break any new
896 // constraints on backtrack, so we can never change any
897 // compound_weight_.
898 for (const auto& c : evaluator_->last_update_violation_changes()) {
899 if (evaluator_->IsViolated(c) &&
900 state_->compound_weights[c] != state_->weights[c]) {
901 state_->compound_weights[c] = state_->weights[c];
902 if (!state_->in_compound_weight_changed[c]) {
903 state_->in_compound_weight_changed[c] = true;
904 state_->compound_weight_changed.push_back(c);
905 }
906 for (const int v : evaluator_->ConstraintToVars(c)) {
907 jumps_.Recompute(v);
908 // Vars will be added in MarkJumpsThatNeedToBeRecomputed().
909 }
910 } else if (!evaluator_->IsViolated(c) &&
911 !state_->in_compound_weight_changed[c] &&
912 state_->compound_weights[c] == state_->weights[c]) {
913 state_->in_compound_weight_changed[c] = true;
914 state_->compound_weight_changed.push_back(c);
915 }
916 }
917 }
918
919 // Check that the score for undoing the move is -score with both the
920 // default weights (which may be `state_->weights` or
921 // `state_->compound_weights`), and with `weights` explicitly.
922 // TODO(user): Re-enable DCHECK.
923 if (/* DISABLES CODE */ false && !state_->options.use_decay) {
924 DCHECK_EQ(-score, ComputeScore(state_->weights, var,
925 prev_value - new_value, false));
926 DCHECK_EQ(-score, ComputeScore(ScanWeights(), var,
927 prev_value - new_value, false));
928 }
929
930 MarkJumpsThatNeedToBeRecomputed(var);
931 if (var_domains_.HasTwoValues(var)) {
932 // We already know the score of the only possible move (undoing what we
933 // just did).
934 jumps_.SetJump(var, prev_value - new_value, -score);
935 }
936
937 if (state_->options.use_compound_moves && !backtrack) {
938 // Make sure we can undo the move.
939 DCHECK_NE(prev_value, state_->solution[var]);
940 state_->move->Push(var, prev_value, score);
941 if (state_->move->Score() < 0) {
942 state_->counters.num_compound_moves += state_->move->Size();
943 state_->move->Clear();
944 state_->compound_move_max_discrepancy = 0;
945 }
946 }
947 continue;
948 } else if (time_limit_crossed_) {
949 return false;
950 }
951
952 DCHECK_EQ(state_->move->Size(), 0);
953 if (evaluator_->ViolatedConstraints().empty()) return true;
954 if (state_->options.use_compound_moves) {
955 ResetChangedCompoundWeights();
956 }
957 if (!state_->options.use_compound_moves ||
958 ++state_->compound_move_max_discrepancy > 2) {
959 state_->compound_move_max_discrepancy = 0;
960 UpdateViolatedConstraintWeights();
961 }
962 }
963 return false;
964}
965
966void FeasibilityJumpSolver::ResetChangedCompoundWeights() {
967 if (!state_->options.use_compound_moves) return;
968 DCHECK_EQ(state_->move->Size(), 0);
969 num_ops_ += state_->compound_weight_changed.size();
970 for (const int c : state_->compound_weight_changed) {
971 state_->in_compound_weight_changed[c] = false;
972 const double expected_weight =
973 (evaluator_->IsViolated(c) ? 1.0 : kCompoundDiscount) *
974 state_->weights[c];
975 if (state_->compound_weights[c] == expected_weight) continue;
976 state_->compound_weights[c] = expected_weight;
977 num_ops_ += evaluator_->ConstraintToVars(c).size();
978 for (const int var : evaluator_->ConstraintToVars(c)) {
979 jumps_.Recompute(var);
980 AddVarToScan(var);
981 }
982 }
983 state_->compound_weight_changed.clear();
984}
985
986bool FeasibilityJumpSolver::ShouldExtendCompoundMove(double score,
987 double novelty) {
988 if (state_->move->Score() + score - std::max(novelty, 0.0) < 0) {
989 return true;
990 }
991 return score < state_->move->BestChildScore();
992}
993
994bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan,
995 int* best_var,
996 int64_t* best_value,
997 double* best_score) {
998 if (time_limit_crossed_) return false;
999 if (state_->move->Discrepancy() > state_->compound_move_max_discrepancy) {
1000 return false;
1001 }
1002 double best_scan_score = 0.0;
1003 int num_good = 0;
1004 int best_index = -1;
1005 *best_var = -1;
1006 *best_score = 0.0;
1007
1008 auto remove_var_to_scan_at_index = [&](int index) {
1009 in_vars_to_scan_[vars_to_scan_[index]] = false;
1010 vars_to_scan_[index] = vars_to_scan_.back();
1011 vars_to_scan_.pop_back();
1012 if (best_index == vars_to_scan_.size()) {
1013 best_index = index;
1014 }
1015 };
1016 while (!vars_to_scan_.empty() && num_good < num_to_scan) {
1017 num_ops_ += 6; // We are slow here.
1018 const int index = absl::Uniform<int>(random_, 0, vars_to_scan_.size());
1019 const int var = vars_to_scan_[index];
1020 DCHECK_GE(var, 0);
1021 DCHECK(in_vars_to_scan_[var]);
1022
1023 if (!ShouldScan(var)) {
1024 remove_var_to_scan_at_index(index);
1025 continue;
1026 }
1027 const auto [delta, scan_score] = jumps_.GetJump(var);
1028 if ((state_->counters.num_general_evals +
1029 state_->counters.num_linear_evals) %
1030 100 ==
1031 0 &&
1032 shared_time_limit_ != nullptr && shared_time_limit_->LimitReached()) {
1033 time_limit_crossed_ = true;
1034 return false;
1035 }
1036 const int64_t current_value = state_->solution[var];
1037 DCHECK(var_domains_[var].Contains(current_value + delta))
1038 << var << " " << current_value << "+" << delta << " not in "
1039 << var_domains_[var].ToString();
1040 DCHECK(!var_domains_[var].IsFixed());
1041 if (scan_score >= 0) {
1042 remove_var_to_scan_at_index(index);
1043 continue;
1044 }
1045 double score = scan_score;
1046 if (state_->options.use_compound_moves) {
1047 // We only use compound moves in general iterations.
1048 score = ComputeScore(
1049 state_->weights, var, delta,
1050 /*linear_only=*/!var_occurs_in_non_linear_constraint_[var]);
1051 if (!ShouldExtendCompoundMove(score, score - scan_score)) {
1052 remove_var_to_scan_at_index(index);
1053 continue;
1054 }
1055 }
1056
1057 ++num_good;
1058 if (scan_score < best_scan_score) {
1059 DCHECK_NE(delta, 0) << score;
1060 *best_var = var;
1061 *best_value = current_value + delta;
1062 *best_score = score;
1063 best_scan_score = scan_score;
1064 best_index = index;
1065 }
1066 }
1067 if (best_index != -1) {
1068 DCHECK_GT(num_good, 0);
1069 DCHECK_GE(*best_var, 0);
1070 DCHECK_EQ(*best_var, vars_to_scan_[best_index]);
1071 remove_var_to_scan_at_index(best_index);
1072 return true;
1073 }
1074 return false;
1075}
1076
1077void FeasibilityJumpSolver::AddVarToScan(int var) {
1078 DCHECK_GE(var, 0);
1079 if (in_vars_to_scan_[var]) return;
1080 if (!ShouldScan(var)) return;
1081 vars_to_scan_.push_back(var);
1082 in_vars_to_scan_[var] = true;
1083}
1084
1085bool FeasibilityJumpSolver::ShouldScan(int var) const {
1086 DCHECK_GE(var, 0);
1087
1088 if (state_->move->OnStack(var)) return false;
1089
1090 if (!jumps_.NeedRecomputation(var)) {
1091 // We already have the score/jump of that variable.
1092 const double score = jumps_.Score(var);
1093 return score < 0.0;
1094 }
1095
1096 // See RecomputeVarsToScan(), we shouldn't have any fixed variable here.
1097 DCHECK(!var_domains_.IsFixed(var));
1098
1099 // Return true iff var is has a better objective value in its domain.
1100 if (var_domains_.HasBetterObjectiveValue(var)) return true;
1101
1102 // We will need to recompute the score. Lets skip variable for which we known
1103 // in advance that there will be no good score.
1104 //
1105 // For the objective, we don't care if it is violated or not, we only want
1106 // to scan variable that might improve it (and thus reduce its violation if it
1107 // is violated).
1108 //
1109 // TODO(user): We should generalize the objective logic to all constraint.
1110 // There is no point scanning a variable of a violated constraint if it is at
1111 // the wrong bound and cannot improve the violation!
1112 return evaluator_->NumViolatedConstraintsForVarIgnoringObjective(var) > 0;
1113}
1114
1115void FeasibilityJumpSolver::RecomputeVarsToScan() {
1116 const int num_variables = var_domains_.size();
1117 jumps_.RecomputeAll(num_variables);
1118 DCHECK(SlowCheckNumViolatedConstraints());
1119
1120 in_vars_to_scan_.assign(num_variables, false);
1121 vars_to_scan_.clear();
1122
1123 // Since the fixed status never changes during one batch, we marks such
1124 // variable as "in_vars_to_scan_" even if we don't add them here. This allow
1125 // to skip them without any extra lookup.
1126 for (const int var : var_domains_.FixedVariables()) {
1127 in_vars_to_scan_[var] = true;
1128 }
1129
1130 num_ops_ += evaluator_->ViolatedConstraints().size();
1131 for (const int c : evaluator_->ViolatedConstraints()) {
1132 num_ops_ += evaluator_->ConstraintToVars(c).size();
1133 for (const int v : evaluator_->ConstraintToVars(c)) {
1134 AddVarToScan(v);
1135 }
1136 }
1137}
1138
1139bool FeasibilityJumpSolver::SlowCheckNumViolatedConstraints() const {
1140 std::vector<int> result;
1141 result.assign(var_domains_.size(), 0);
1142 for (const int c : evaluator_->ViolatedConstraints()) {
1143 if (evaluator_->IsObjectiveConstraint(c)) continue;
1144 for (const int v : evaluator_->ConstraintToVars(c)) {
1145 ++result[v];
1146 }
1147 }
1148 for (int v = 0; v < result.size(); ++v) {
1149 CHECK_EQ(result[v],
1150 evaluator_->NumViolatedConstraintsForVarIgnoringObjective(v));
1151 }
1152 return true;
1153}
1154
1156 for (const UnitMove& move : stack_) {
1157 var_on_stack_[move.var] = false;
1158 }
1159 stack_.clear();
1160}
1161
1162bool CompoundMoveBuilder::OnStack(int var) const {
1163 return !stack_.empty() && var_on_stack_[var];
1164}
1165
1166bool CompoundMoveBuilder::Backtrack(int* var, int64_t* value, double* score) {
1167 if (stack_.empty()) return false;
1168 *var = stack_.back().var;
1169 *value = stack_.back().prev_value;
1170 *score = stack_.back().score;
1171 var_on_stack_[*var] = false;
1172 stack_.pop_back();
1173 if (!stack_.empty()) {
1174 ++stack_.back().discrepancy;
1175 }
1176 return true;
1177}
1178
1179void CompoundMoveBuilder::Push(int var, int64_t prev_value, double score) {
1180 DCHECK(!var_on_stack_[var]);
1181 if (!stack_.empty()) {
1182 stack_.back().best_child_score =
1183 std::min(stack_.back().best_child_score, score);
1184 }
1185 var_on_stack_[var] = true;
1186 stack_.push_back({
1187 .var = var,
1188 .prev_value = prev_value,
1189 .score = -score,
1190 .cumulative_score = Score() + score,
1191 .discrepancy = Discrepancy(),
1192 });
1193}
1194
1196 absl::Span<const Domain> var_domains) const {
1197 for (const UnitMove& move : stack_) {
1198 if (!var_domains[move.var].Contains(move.prev_value)) return false;
1199 }
1200 return true;
1201}
1202
1203} // namespace operations_research::sat
bool StackValuesInDomains(absl::Span< const Domain > var_domains) const
Returns true if all prev_values on the stack are in the appropriate domain.
double Score() const
Returns the sum of scores of atomic moved pushed to this compound move.
void Clear()
Removes all moves on the stack.
int Discrepancy() const
Returns the number of backtracks to any ancestor of the current leaf.
bool OnStack(int var) const
Returns true if this var has been set in this move already,.
bool Backtrack(int *var, int64_t *value, double *score)
void Push(int var, int64_t prev_value, double score)
~FeasibilityJumpSolver() override
If VLOG_IS_ON(1), it will export a bunch of statistics.
std::function< void()> GenerateTask(int64_t) final
void Recompute(int var)
Recompute the jump for var when GetJump(var) is next called.
std::pair< int64_t, double > GetJump(int var)
Gets the current jump delta and score, recomputing if necessary.
void SetComputeFunction(absl::AnyInvocable< std::pair< int64_t, double >(int) const > compute_jump)
void SetJump(int var, int64_t delta, double score)
void CollectStatistics(const LsState &state)
Accumulate in the relevant bucket the counters of the given states.
void AddTaskDeterministicDuration(double deterministic_duration)
Definition subsolver.h:114
SubsolverType type() const
Returns the type of the subsolver.
Definition subsolver.h:98
std::string name() const
Returns the name of this SubSolver. Used in logs.
Definition subsolver.h:95
bool SolutionIsFeasible(const CpModelProto &model, absl::Span< const int64_t > variable_values, const CpModelProto *mapping_proto, const std::vector< int > *postsolve_mapping)
std::function< bool(const Model &)> IsFixed(IntegerVariable v)
Definition integer.h:1551
std::string FormatCounter(int64_t num)
Prints a positive number with separators for easier reading (ex: 1'348'065).
Definition util.cc:44
Domain ReadDomainFromProto(const ProtoWithDomain &proto)
Reads a Domain from the domain field of a proto.
PushedSolutionPointers PushAndMaybeCombineSolution(SharedResponseManager *response_manager, const CpModelProto &model_proto, absl::Span< const int64_t > new_solution, const std::string &solution_info, absl::Span< const int64_t > base_solution, Model *model)
Select next search node to expand Select next item_i to add this new search node to the search Generate a new search node where item_i is not in the knapsack Check validity of this new partial solution(using propagators) - If valid
std::pair< Point, Value > RangeConvexMinimum(Point begin, Point end, absl::FunctionRef< Value(Point)> f)
Searches in the range [begin, end), where Point supports basic arithmetic.
std::pair< Point, Value > ConvexMinimum(absl::Span< const Point > sorted_points, std::function< Value(Point)> f)
STL namespace.
Definition model.h:51
int64_t Max() const
Returns the max of the domain.
Definition model.cc:346
int64_t Min() const
Returns the min of the domain.
Definition model.cc:340
bool Contains(int64_t value) const
Various inclusion tests on a domain.
Definition model.cc:363
LsCounters counters
Counters for a "non-restarted" run.
std::unique_ptr< CompoundMoveBuilder > move