Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
feasibility_jump.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdlib.h>
17
18#include <algorithm>
19#include <atomic>
20#include <cmath>
21#include <cstdint>
22#include <functional>
23#include <limits>
24#include <memory>
25#include <string>
26#include <tuple>
27#include <utility>
28#include <vector>
29
30#include "absl/functional/any_invocable.h"
31#include "absl/functional/bind_front.h"
32#include "absl/functional/function_ref.h"
33#include "absl/log/check.h"
34#include "absl/log/log.h"
35#include "absl/log/vlog_is_on.h"
36#include "absl/random/bit_gen_ref.h"
37#include "absl/random/distributions.h"
38#include "absl/strings/str_cat.h"
39#include "absl/types/span.h"
51#include "ortools/sat/util.h"
54
56
57namespace {
58// How much do we discount moves we might fix later.
59constexpr double kCompoundDiscount = 1. / 1024;
60} // namespace
61
63 absl::AnyInvocable<std::pair<int64_t, double>(int) const> compute_jump) {
64 compute_jump_ = std::move(compute_jump);
65}
66
67void JumpTable::RecomputeAll(int num_variables) {
68 deltas_.resize(num_variables);
69 scores_.resize(num_variables);
70 needs_recomputation_.assign(num_variables, true);
71}
72
73void JumpTable::SetJump(int var, int64_t delta, double score) {
74 deltas_[var] = delta;
75 scores_[var] = score;
76 needs_recomputation_[var] = false;
77}
78
79void JumpTable::Recompute(int var) { needs_recomputation_[var] = true; }
80
81bool JumpTable::JumpIsUpToDate(int var) const {
82 const auto& [delta, score] = compute_jump_(var);
83 if (delta != deltas_[var]) {
84 LOG(ERROR) << "Incorrect delta for var " << var << ": " << deltas_[var]
85 << " (should be " << delta << ")";
86 }
87 bool score_ok = true;
88 if (abs(score - scores_[var]) / std::max(abs(score), 1.0) > 1e-2) {
89 score_ok = false;
90 LOG(ERROR) << "Incorrect score for var " << var << ": " << scores_[var]
91 << " (should be " << score << ") "
92 << " delta = " << delta;
93 }
94 return delta == deltas_[var] && score_ok;
95}
96
97std::pair<int64_t, double> JumpTable::GetJump(int var) {
98 if (needs_recomputation_[var]) {
99 needs_recomputation_[var] = false;
100 std::tie(deltas_[var], scores_[var]) = compute_jump_(var);
101 }
102 return std::make_pair(deltas_[var], scores_[var]);
103}
104
106 // Do a final collection.
107 for (int i = 0; i < states_.size(); ++i) {
108 CollectStatistics(*states_[i].get());
109 }
110
111 // Display aggregated states
112 for (const auto& [options, counters] : options_to_stats_) {
113 stat_tables_->AddLsStat(
114 absl::StrCat(name_, "_", options.name()), counters.num_batches,
115 options_to_num_restarts_[options] + counters.num_perturbations,
116 counters.num_linear_moves, counters.num_general_moves,
117 counters.num_compound_moves, counters.num_backtracks,
118 counters.num_weight_updates, counters.num_scores_computed);
119 }
120}
121
123 stat_tables_->AddTimingStat(*this);
124}
125
126void FeasibilityJumpSolver::ImportState() {
127 state_ = states_->GetNextState();
128 if (state_->move == nullptr) {
129 const int num_variables = var_domains_.size();
130 state_->move = std::make_unique<CompoundMoveBuilder>(num_variables);
131 }
132}
133
134void FeasibilityJumpSolver::ReleaseState() { states_->Release(state_); }
135
136bool FeasibilityJumpSolver::Initialize() {
137 // For now we just disable or enable it.
138 // But in the future we might have more variation.
139 if (params_.feasibility_jump_linearization_level() == 0) {
140 evaluator_ = std::make_unique<LsEvaluator>(linear_model_->model_proto(),
141 params_, &time_limit_);
142 } else {
143 evaluator_ = std::make_unique<LsEvaluator>(
144 linear_model_->model_proto(), params_,
145 linear_model_->ignored_constraints(),
146 linear_model_->additional_constraints(), &time_limit_);
147 }
148
149 if (time_limit_.LimitReached()) {
150 evaluator_.reset();
151 return false;
152 }
153 is_initialized_ = true;
154 const int num_variables = linear_model_->model_proto().variables().size();
155 var_domains_.resize(num_variables);
156 for (int v = 0; v < num_variables; ++v) {
157 var_domains_.Set(
158 v, ReadDomainFromProto(linear_model_->model_proto().variables(v)));
159 }
160 var_domains_.InitializeObjective(linear_model_->model_proto());
161
162 vars_to_scan_.ClearAndReserve(num_variables);
163 var_occurs_in_non_linear_constraint_.resize(num_variables);
164 for (int c = 0; c < evaluator_->NumNonLinearConstraints(); ++c) {
165 for (int v : evaluator_->GeneralConstraintToVars(c)) {
166 var_occurs_in_non_linear_constraint_[v] = true;
167 }
168 }
169 return true;
170}
171
172namespace {
173
174int64_t ComputeRange(int64_t range, double range_ratio) {
175 return static_cast<int64_t>(
176 std::ceil(static_cast<double>(range) * range_ratio));
177}
178
179// TODO(user): Optimize and move to the Domain class.
180// TODO(user): Improve entropy on non continuous domains.
181int64_t RandomValueNearMin(const Domain& domain, double range_ratio,
182 absl::BitGenRef random) {
183 if (domain.Size() == 1) return domain.FixedValue();
184 if (domain.Size() == 2) {
185 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Min()
186 : domain.Max();
187 }
188 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
189 return domain.ValueAtOrBefore(domain.Min() +
190 absl::LogUniform<int64_t>(random, 0, range));
191}
192
193int64_t RandomValueNearMax(const Domain& domain, double range_ratio,
194 absl::BitGenRef random) {
195 if (domain.Size() == 1) return domain.FixedValue();
196 if (domain.Size() == 2) {
197 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Max()
198 : domain.Min();
199 }
200 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
201 return domain.ValueAtOrAfter(domain.Max() -
202 absl::LogUniform<int64_t>(random, 0, range));
203}
204
205int64_t RandomValueNearValue(const Domain& domain, int64_t value,
206 double range_ratio, absl::BitGenRef random) {
207 DCHECK(!domain.IsFixed());
208
209 if (domain.Min() >= value) {
210 return RandomValueNearMin(domain, range_ratio, random);
211 }
212
213 if (domain.Max() <= value) {
214 return RandomValueNearMax(domain, range_ratio, random);
215 }
216
217 // Split up or down, and choose value in split domain.
218 const Domain greater_domain =
219 domain.IntersectionWith({value + 1, domain.Max()});
220 const double choose_greater_probability =
221 static_cast<double>(greater_domain.Size()) /
222 static_cast<double>(domain.Size() - 1);
223 if (absl::Bernoulli(random, choose_greater_probability)) {
224 return RandomValueNearMin(greater_domain, range_ratio, random);
225 } else {
226 return RandomValueNearMax(
227 domain.IntersectionWith({domain.Min(), value - 1}), range_ratio,
228 random);
229 }
230}
231
232} // namespace
233
234void FeasibilityJumpSolver::ResetCurrentSolution(
235 bool use_hint, bool use_objective, double perturbation_probability) {
236 const int num_variables = linear_model_->model_proto().variables().size();
237 const double default_value_probability = 1.0 - perturbation_probability;
238 const double range_ratio =
239 params_.feasibility_jump_var_perburbation_range_ratio();
240 std::vector<int64_t>& solution = state_->solution;
241 state_->base_solution = nullptr;
242
243 // Resize the solution if needed.
244 solution.resize(num_variables);
245
246 // Starts with values closest to zero.
247 for (int var = 0; var < num_variables; ++var) {
248 if (var_domains_[var].IsFixed()) {
249 solution[var] = var_domains_[var].FixedValue();
250 continue;
251 }
252
253 if (absl::Bernoulli(random_, default_value_probability)) {
254 solution[var] = var_domains_[var].SmallestValue();
255 } else {
256 solution[var] =
257 RandomValueNearValue(var_domains_[var], 0, range_ratio, random_);
258 }
259 }
260
261 // Use objective half of the time (if the model has one).
262 if (use_objective && linear_model_->model_proto().has_objective()) {
263 const int num_terms =
264 linear_model_->model_proto().objective().vars().size();
265 for (int i = 0; i < num_terms; ++i) {
266 const int var = linear_model_->model_proto().objective().vars(i);
267 if (var_domains_[var].IsFixed()) continue;
268
269 if (linear_model_->model_proto().objective().coeffs(i) > 0) {
270 if (absl::Bernoulli(random_, default_value_probability)) {
271 solution[var] = var_domains_[var].Min();
272 } else {
273 solution[var] =
274 RandomValueNearMin(var_domains_[var], range_ratio, random_);
275 }
276 } else {
277 if (absl::Bernoulli(random_, default_value_probability)) {
278 solution[var] = var_domains_[var].Max();
279 } else {
280 solution[var] =
281 RandomValueNearMax(var_domains_[var], range_ratio, random_);
282 }
283 }
284 }
285 }
286
287 if (use_hint && linear_model_->model_proto().has_solution_hint()) {
288 const auto& hint = linear_model_->model_proto().solution_hint();
289 for (int i = 0; i < hint.vars().size(); ++i) {
290 solution[hint.vars(i)] = hint.values(i);
291 }
292 }
293}
294
295void FeasibilityJumpSolver::PerturbateCurrentSolution(
296 double perturbation_probability) {
297 if (perturbation_probability == 0.0) return;
298 const int num_variables = linear_model_->model_proto().variables().size();
299 const double perturbation_ratio =
300 params_.feasibility_jump_var_perburbation_range_ratio();
301 std::vector<int64_t>& solution = state_->solution;
302 for (int var = 0; var < num_variables; ++var) {
303 if (var_domains_[var].IsFixed()) continue;
304 if (absl::Bernoulli(random_, perturbation_probability)) {
305 solution[var] = RandomValueNearValue(var_domains_[var], solution[var],
306 perturbation_ratio, random_);
307 }
308 }
309}
310
311std::string FeasibilityJumpSolver::OneLineStats() const {
312 // Moves and evaluations in the general iterations.
313 const std::string general_str =
314 state_->counters.num_general_evals == 0 &&
315 state_->counters.num_general_moves == 0
316 ? ""
317 : absl::StrCat(
318 " gen{mvs:", FormatCounter(state_->counters.num_general_moves),
319 " evals:", FormatCounter(state_->counters.num_general_evals),
320 "}");
321 const std::string compound_str =
322 state_->counters.num_compound_moves == 0 &&
323 state_->counters.num_backtracks == 0
324 ? ""
325 : absl::StrCat(" comp{mvs:",
326 FormatCounter(state_->counters.num_compound_moves),
327 " btracks:",
328 FormatCounter(state_->counters.num_backtracks), "}");
329
330 // Improving jumps and infeasible constraints.
331 const int num_infeasible_cts = evaluator_->NumInfeasibleConstraints();
332 const std::string non_solution_str =
333 num_infeasible_cts == 0
334 ? ""
335 : absl::StrCat(" #good_moves:", FormatCounter(vars_to_scan_.size()),
336 " #inf_cts:",
337 FormatCounter(evaluator_->NumInfeasibleConstraints()));
338
339 return absl::StrCat(
340 "batch:", state_->counters.num_batches,
341 " lin{mvs:", FormatCounter(state_->counters.num_linear_moves),
342 " evals:", FormatCounter(state_->counters.num_linear_evals), "}",
343 general_str, compound_str, non_solution_str,
344 " #w_updates:", FormatCounter(state_->counters.num_weight_updates),
345 " #perturb:", FormatCounter(state_->counters.num_perturbations));
346}
347
348std::function<void()> FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) {
349 task_generated_ = true; // Atomic.
350
351 return [this] {
352 // We delay initialization to the first task as it might be a bit slow
353 // to scan the whole model, so we want to do this part in parallel.
354 if (!is_initialized_) {
355 if (!Initialize()) {
356 return;
357 }
358 }
359
360 // Load the next state to work on.
361 ImportState();
362
363 // If we found a new best solution, we will restart all violation ls (we
364 // still finish each batch though). We will also reset the luby sequence.
365 bool new_best_solution_was_found = false;
366 if (type() == SubSolver::INCOMPLETE) {
367 const int64_t best =
368 shared_response_->SolutionsRepository().GetBestRank();
369 if (best < state_->last_solution_rank) {
370 states_->ResetLubyCounter();
371 new_best_solution_was_found = true;
372 state_->last_solution_rank = best;
373 }
374 }
375
376 bool reset_weights = false;
377 if (new_best_solution_was_found || state_->num_batches_before_change <= 0) {
378 reset_weights = true;
379 if (state_->options.use_restart) {
380 states_->CollectStatistics(*state_);
381 state_->options.Randomize(params_, &random_);
382 state_->counters = LsCounters(); // Reset.
383 } else {
384 state_->options.Randomize(params_, &random_);
385 }
386 if (type() == SubSolver::INCOMPLETE) {
387 // This is not used once we have a solution, and setting it to false
388 // allow to fix the logs.
389 state_->options.use_objective = false;
390 }
391
392 const bool first_time = (state_->num_restarts == 0);
393 if (state_->options.use_restart || first_time ||
394 new_best_solution_was_found) {
395 if (type() == SubSolver::INCOMPLETE) {
396 // Choose a base solution for this neighborhood.
397 std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
398 solution = shared_response_->SolutionsRepository()
399 .GetRandomBiasedSolution(random_);
400 state_->solution = solution->variable_values;
401 state_->base_solution = solution;
402 ++state_->num_solutions_imported;
403 } else {
404 if (!first_time) {
405 // Register this solution before we reset the search.
406 const int num_violations = evaluator_->ViolatedConstraints().size();
407 shared_hints_->AddSolution(state_->solution, num_violations);
408 }
409 ResetCurrentSolution(/*use_hint=*/first_time,
410 state_->options.use_objective,
411 state_->options.perturbation_probability);
412 }
413 } else {
414 PerturbateCurrentSolution(
415 params_.feasibility_jump_var_randomization_probability());
416 }
417
418 if (state_->options.use_restart) {
419 ++state_->num_restarts;
420 states_->ConfigureNextLubyRestart(state_);
421 } else {
422 // TODO(user): Tune the improvement constant, maybe use luby.
423 ++state_->counters.num_perturbations;
424 state_->num_batches_before_change =
425 params_.violation_ls_perturbation_period();
426 }
427 }
428
429 // Between chunk, we synchronize bounds.
430 bool recompute_compound_weights = false;
431 if (linear_model_->model_proto().has_objective()) {
432 const IntegerValue lb = shared_response_->GetInnerObjectiveLowerBound();
433 const IntegerValue ub = shared_response_->GetInnerObjectiveUpperBound();
434 if (lb != state_->saved_inner_objective_lb ||
435 ub != state_->saved_inner_objective_ub) {
436 recompute_compound_weights = true;
437 }
438 state_->saved_inner_objective_lb = lb;
439 state_->saved_inner_objective_ub = ub;
440
441 if (ub < lb) return; // Search is finished.
442 if (evaluator_->ReduceObjectiveBounds(lb.value(), ub.value())) {
443 recompute_compound_weights = true;
444 }
445 }
446
447 // Update the variable domains with the last information.
448 if (!var_domains_.UpdateFromSharedBounds()) return;
449
450 // Checks the current solution is compatible with updated domains.
451 {
452 // Make sure the solution is within the potentially updated domain.
453 // This also initialize var_domains_.CanIncrease()/CanDecrease().
454 const int num_vars = state_->solution.size();
455 for (int var = 0; var < num_vars; ++var) {
456 const int64_t old_value = state_->solution[var];
457 const int64_t new_value = var_domains_[var].ClosestValue(old_value);
458 if (new_value != old_value) {
459 state_->solution[var] = new_value;
460 recompute_compound_weights = true;
461 }
462 var_domains_.OnValueChange(var, new_value);
463 }
464 // Check if compound move search might backtrack out of the new domains.
465 if (!state_->move->StackValuesInDomains(var_domains_.AsSpan())) {
466 recompute_compound_weights = true;
467 }
468 }
469
470 // Search for feasible solution.
471 // We always recompute that since we might have loaded from a different
472 // state.
473 evaluator_->ComputeAllViolations(state_->solution);
474
475 if (reset_weights) {
476 state_->bump_value = 1.0;
477 state_->weights.assign(evaluator_->NumEvaluatorConstraints(), 1.0);
478 recompute_compound_weights = true;
479 }
480 if (recompute_compound_weights) {
481 state_->move->Clear();
482 if (state_->options.use_compound_moves) {
483 state_->compound_weights.assign(state_->weights.begin(),
484 state_->weights.end());
485 for (int c = 0; c < state_->weights.size(); ++c) {
486 if (evaluator_->IsViolated(c)) continue;
487 state_->compound_weights[c] *= kCompoundDiscount;
488 }
489 state_->compound_weight_changed.clear();
490 state_->in_compound_weight_changed.assign(state_->weights.size(),
491 false);
492 state_->compound_move_max_discrepancy = 0;
493 }
494 }
495
496 if (!state_->options.use_compound_moves) {
497 DCHECK_EQ(state_->move->Size(), 0);
498 }
499
500 ++state_->counters.num_batches;
501 if (DoSomeLinearIterations() && DoSomeGeneralIterations()) {
502 // Checks for infeasibility induced by the non supported constraints.
503 if (SolutionIsFeasible(linear_model_->model_proto(), state_->solution)) {
504 auto pointers = PushAndMaybeCombineSolution(
505 shared_response_, linear_model_->model_proto(), state_->solution,
506 absl::StrCat(name(), "_", state_->options.name(), "(",
507 OneLineStats(), ")"),
508 state_->base_solution == nullptr
509 ? absl::Span<const int64_t>()
510 : state_->base_solution->variable_values,
511 /*model=*/nullptr);
512 // If we pushed a new solution, we use it as a new "base" so that we
513 // will have a smaller delta on the next solution we find.
514 state_->base_solution = pointers.pushed_solution;
515 } else {
516 shared_response_->LogMessage(name(), "infeasible solution. Aborting.");
517 model_is_supported_ = false;
518 }
519 }
520
521 // Update dtime.
522 // Since we execute only one task at the time, this is safe.
523 {
524 // TODO(user): Find better names. DeterministicTime() is maintained by
525 // this class while deterministic_time() is the one saved in the SubSolver
526 // base class).
527 const double current_dtime = DeterministicTime();
528 const double delta = current_dtime - deterministic_time();
529
530 // Because deterministic_time() is computed with a sum of difference, it
531 // might be slighlty different than DeterministicTime() and we don't want
532 // to go backward, even by 1e-18.
533 if (delta >= 0) {
535 shared_time_limit_->AdvanceDeterministicTime(delta);
536 }
537 }
538
539 ReleaseState();
540 task_generated_ = false; // Atomic.
541 };
542}
543
544double FeasibilityJumpSolver::ComputeScore(absl::Span<const double> weights,
545 int var, int64_t delta,
546 bool linear_only) {
548 double score = evaluator_->WeightedViolationDelta(
549 linear_only, weights, var, delta, absl::MakeSpan(state_->solution));
550 constexpr double kEpsilon = 1.0 / std::numeric_limits<int64_t>::max();
551 score += kEpsilon * delta * evaluator_->ObjectiveCoefficient(var);
552 return score;
553}
554
555std::pair<int64_t, double> FeasibilityJumpSolver::ComputeLinearJump(int var) {
556 DCHECK(!var_domains_[var].IsFixed());
557 const int64_t current_value = state_->solution[var];
558
559 ++state_->counters.num_linear_evals;
560 const LinearIncrementalEvaluator& linear_evaluator =
561 evaluator_->LinearEvaluator();
562
563 if (var_domains_.HasTwoValues(var)) {
564 const int64_t min_value = var_domains_[var].Min();
565 const int64_t max_value = var_domains_[var].Max();
566 const int64_t delta = current_value == min_value ? max_value - min_value
567 : min_value - max_value;
568 return std::make_pair(
569 delta, ComputeScore(ScanWeights(), var, delta, /*linear_only=*/true));
570 }
571
572 // In practice, after a few iterations, the chance of finding an improving
573 // move is slim, and we can test that fairly easily with at most two
574 // queries!
575 //
576 // Tricky/Annoying: if the value is not in the domain, we returns it.
577 const int64_t p1 = var_domains_[var].ValueAtOrBefore(current_value - 1);
578 const int64_t p2 = var_domains_[var].ValueAtOrAfter(current_value + 1);
579
580 std::pair<int64_t, double> best_jump;
581 const double v1 = var_domains_[var].Contains(p1)
582 ? ComputeScore(ScanWeights(), var, p1 - current_value,
583 /*linear_only=*/true)
584 : std::numeric_limits<double>::infinity();
585 if (v1 < 0.0) {
586 // Point p1 is improving. Look for best before it.
587 // Note that we can exclude all point after current_value since it is
588 // worse and we assume convexity.
589 const Domain dom = var_domains_[var].IntersectionWith(
590 Domain(std::numeric_limits<int64_t>::min(), p1 - 1));
591 if (dom.IsEmpty()) {
592 best_jump = {p1, v1};
593 } else {
594 tmp_breakpoints_ =
595 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
597 /*is_to_the_right=*/true, {p1, v1}, tmp_breakpoints_,
598 [this, var, current_value](int64_t jump_value) {
599 return ComputeScore(ScanWeights(), var, jump_value - current_value,
600 /*linear_only=*/true);
601 });
602 }
603 } else {
604 const double v2 = var_domains_[var].Contains(p2)
605 ? ComputeScore(ScanWeights(), var, p2 - current_value,
606 /*linear_only=*/true)
607 : std::numeric_limits<double>::infinity();
608 if (v2 < 0.0) {
609 // Point p2 is improving. Look for best after it.
610 // Similarly, we exclude the other points by convexity.
611 const Domain dom = var_domains_[var].IntersectionWith(
612 Domain(p2 + 1, std::numeric_limits<int64_t>::max()));
613 if (dom.IsEmpty()) {
614 best_jump = {p2, v2};
615 } else {
616 tmp_breakpoints_ =
617 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
619 /*is_to_the_right=*/false, {p2, v2}, tmp_breakpoints_,
620 [this, var, current_value](int64_t jump_value) {
621 return ComputeScore(ScanWeights(), var,
622 jump_value - current_value,
623 /*linear_only=*/true);
624 });
625 }
626 } else {
627 // We have no improving point, result is either p1 or p2. This is the
628 // most common scenario, and require no breakpoint computation!
629 // Choose the direction which increases violation the least,
630 // disambiguating by best objective.
631 if (v1 < v2) {
632 best_jump = {p1, v1};
633 } else {
634 best_jump = {p2, v2};
635 }
636 }
637 }
638 DCHECK_NE(best_jump.first, current_value);
639 return std::make_pair(best_jump.first - current_value, best_jump.second);
640}
641
642std::pair<int64_t, double> FeasibilityJumpSolver::ComputeGeneralJump(int var) {
643 if (!var_occurs_in_non_linear_constraint_[var]) {
644 return ComputeLinearJump(var);
645 }
646 Domain domain = var_domains_[var];
647 if (domain.IsFixed()) return std::make_pair(0, 0.0);
648
649 ++state_->counters.num_general_evals;
650 const int64_t current_value = state_->solution[var];
651 domain = domain.IntersectionWith(
652 Domain(current_value, current_value).Complement());
653 std::pair<int64_t, double> result;
654 for (int i = 0; i < domain.NumIntervals(); ++i) {
655 const int64_t min_delta = domain[i].start - current_value;
656 const int64_t max_delta = domain[i].end - current_value;
657 const auto& [delta, score] = RangeConvexMinimum<int64_t, double>(
658 min_delta, max_delta + 1, [&](int64_t delta) -> double {
659 return ComputeScore(ScanWeights(), var, delta, /*linear_only=*/false);
660 });
661 if (i == 0 || score < result.second) {
662 result = std::make_pair(delta, score);
663 }
664 }
665 DCHECK(domain.Contains(current_value + result.first))
666 << current_value << "+" << result.first << " not in domain "
667 << domain.ToString();
668 return result;
669}
670
671void FeasibilityJumpSolver::UpdateViolatedConstraintWeights() {
672 ++state_->counters.num_weight_updates;
673
674 // Because we update the weight incrementally, it is better to not have a
675 // super high magnitude, otherwise doing +max_weight and then -max_weight
676 // will just ignore any constraint with a small weight and our
677 // DCHECK(JumpIsUpToDate(var)) will fail more often.
678 const double kMaxWeight = 1e10;
679 const double kBumpFactor = 1.0 / params_.feasibility_jump_decay();
680 const int num_variables = var_domains_.size();
681 if (state_->options.use_decay) {
682 state_->bump_value *= kBumpFactor;
683 }
684
685 // Note that ViolatedConstraints() might contain only linear constraint
686 // depending on how it was initialized and updated.
687 bool rescale = false;
688 num_ops_ += evaluator_->ViolatedConstraints().size();
689 for (const int c : evaluator_->ViolatedConstraints()) {
690 DCHECK(evaluator_->IsViolated(c));
691 if (state_->options.use_compound_moves) {
692 DCHECK_EQ(state_->compound_weights[c], state_->weights[c]);
693 }
694 state_->weights[c] += state_->bump_value;
695 if (state_->options.use_compound_moves) {
696 state_->compound_weights[c] = state_->weights[c];
697 }
698 if (state_->weights[c] > kMaxWeight) {
699 rescale = true;
700 }
701 }
702
703 if (rescale) {
704 const double factor = 1.0 / kMaxWeight;
705 state_->bump_value *= factor;
706 for (int c = 0; c < state_->weights.size(); ++c) {
707 state_->weights[c] *= factor;
708 if (state_->options.use_compound_moves) {
709 state_->compound_weights[c] *= factor;
710 }
711 }
712 jumps_.RecomputeAll(num_variables);
713 return;
714 }
715
716 // Update weight incrementally.
717 //
718 // To maximize floating point precision, we compute the change to jump value
719 // first and then apply it in one go. Also, in most situation the change is
720 // purely integer and should fit exactly on a double, so we don't depend on
721 // the order in which constraint are listed.
722 LinearIncrementalEvaluator* linear_evaluator =
723 evaluator_->MutableLinearEvaluator();
724 linear_evaluator->ClearAffectedVariables();
725 for_weight_update_.resize(num_variables);
726 num_ops_ += evaluator_->ViolatedConstraints().size();
727 for (const int c : evaluator_->ViolatedConstraints()) {
728 if (c < evaluator_->NumLinearConstraints()) {
729 linear_evaluator->UpdateScoreOnWeightUpdate(
730 c, jumps_.Deltas(), absl::MakeSpan(for_weight_update_));
731 } else {
732 for (const int v : evaluator_->ConstraintToVars(c)) {
733 jumps_.Recompute(v);
734 AddVarToScan(v);
735 }
736 }
737 }
738
739 // Recompute the affected jumps.
740 // Note that the constraint violations are unaffected.
741 absl::Span<double> scores = jumps_.MutableScores();
742 for (const int var : linear_evaluator->VariablesAffectedByLastUpdate()) {
743 // Apply the delta.
744 //
745 // TODO(user): We could compute the minimal bump that would lead to a
746 // good move. That might change depending on the jump value though, so
747 // we can only do that easily for Booleans.
748 if (!var_domains_.HasTwoValues(var)) {
749 jumps_.Recompute(var);
750 } else {
751 // We may know the correct score for binary vars.
752 scores[var] += state_->bump_value * for_weight_update_[var];
753 }
754 AddVarToScan(var);
755 }
756}
757
758bool FeasibilityJumpSolver::DoSomeLinearIterations() {
759 if (VLOG_IS_ON(1)) {
760 shared_response_->LogMessageWithThrottling(name(), OneLineStats());
761 }
762
763 // TODO(user): It should be possible to support compound moves with
764 // the specialized linear code, but lets keep it simpler for now.
765 if (state_->options.use_compound_moves) return true;
766
767 evaluator_->RecomputeViolatedList(/*linear_only=*/true);
768 jumps_.SetComputeFunction(
769 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
770 RecomputeVarsToScan();
771
772 // Do a batch of a given dtime.
773 // Outer loop: when no more greedy moves, update the weight.
774 const double dtime_threshold =
775 DeterministicTime() + params_.feasibility_jump_batch_dtime();
776 while (DeterministicTime() < dtime_threshold) {
777 // Inner loop: greedy descent.
778 while (DeterministicTime() < dtime_threshold) {
779 // Take the best jump score amongst some random candidates.
780 // It is okay if we pick twice the same, we don't really care.
781 int best_var;
782 int64_t best_value;
783 double best_score;
784 if (!ScanRelevantVariables(/*num_to_scan=*/5, &best_var, &best_value,
785 &best_score)) {
786 break;
787 }
788
789 // Perform the move.
790 ++state_->counters.num_linear_moves;
791 const int64_t prev_value = state_->solution[best_var];
792 state_->solution[best_var] = best_value;
793 evaluator_->UpdateLinearScores(best_var, prev_value, best_value,
794 state_->weights, jumps_.Deltas(),
795 jumps_.MutableScores());
796 evaluator_->UpdateViolatedList();
797 var_domains_.OnValueChange(best_var, best_value);
798
799 MarkJumpsThatNeedToBeRecomputed(best_var);
800 if (var_domains_.HasTwoValues(best_var)) {
801 // We already know the score of undoing the move we just did, and that
802 // this is optimal.
803 jumps_.SetJump(best_var, prev_value - best_value, -best_score);
804 }
805 }
806 if (time_limit_crossed_) return false;
807
808 // We will update the weight unless the queue is non-empty.
809 if (vars_to_scan_.empty()) {
810 // Note that we only count linear constraint as violated here.
811 if (evaluator_->ViolatedConstraints().empty()) return true;
812 UpdateViolatedConstraintWeights();
813 }
814 }
815 return false;
816}
817
818// Update the jump scores.
819//
820// We incrementally maintain the score (except for best_var).
821// However for non-Boolean, we still need to recompute the jump value.
822// We will do that in a lazy fashion.
823//
824// TODO(user): In the paper, they just recompute the scores and only
825// change the jump values when the constraint weight changes. Experiment?
826// Note however that the current code is quite fast.
827//
828// TODO(user): For non-Boolean, we could easily detect if a non-improving
829// score cannot become improving. We don't need to add such variable to
830// the queue.
831void FeasibilityJumpSolver::MarkJumpsThatNeedToBeRecomputed(int changed_var) {
832 // To keep DCHECKs happy. Note that we migh overwrite this afterwards with the
833 // known score/jump of undoing the move.
834 jumps_.Recompute(changed_var);
835
836 // Generic part.
837 // No optimization there, we just update all touched variables.
838 // We need to do this before the Linear part, so that the status is correct in
839 // AddVarToScan() for variable with two values.
840 num_ops_ += evaluator_->VarToGeneralConstraints(changed_var).size();
841 for (const int c : evaluator_->VarToGeneralConstraints(changed_var)) {
842 num_ops_ += evaluator_->GeneralConstraintToVars(c).size();
843 for (const int var : evaluator_->GeneralConstraintToVars(c)) {
844 jumps_.Recompute(var);
845 AddVarToScan(var);
846 }
847 }
848
849 // Linear part.
850 num_ops_ += evaluator_->VariablesAffectedByLastLinearUpdate().size();
851 for (const int var : evaluator_->VariablesAffectedByLastLinearUpdate()) {
852 if (!var_domains_.HasTwoValues(var)) {
853 jumps_.Recompute(var);
854 }
855 AddVarToScan(var);
856 }
857}
858
859bool FeasibilityJumpSolver::DoSomeGeneralIterations() {
860 if (!state_->options.use_compound_moves &&
861 evaluator_->NumNonLinearConstraints() == 0) {
862 return true;
863 }
864 // Non-linear constraints are not evaluated in the linear phase.
865 evaluator_->ComputeAllNonLinearViolations(state_->solution);
866 evaluator_->RecomputeViolatedList(/*linear_only=*/false);
867 if (evaluator_->NumNonLinearConstraints() == 0) {
868 jumps_.SetComputeFunction(
869 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
870 } else {
871 jumps_.SetComputeFunction(
872 absl::bind_front(&FeasibilityJumpSolver::ComputeGeneralJump, this));
873 }
874 RecomputeVarsToScan();
875
876 const double dtime_threshold =
877 DeterministicTime() + params_.feasibility_jump_batch_dtime();
878 while (DeterministicTime() < dtime_threshold) {
879 int var;
880 int64_t new_value;
881 double score;
882 const bool found_move = ScanRelevantVariables(
883 /*num_to_scan=*/3, &var, &new_value, &score);
884 const bool backtrack =
885 !found_move && state_->move->Backtrack(&var, &new_value, &score);
886 if (found_move || backtrack) {
887 if (backtrack) ++state_->counters.num_backtracks;
888 DCHECK_NE(var, -1) << var << " " << found_move << " " << backtrack;
889
890 // Perform the move.
891 ++state_->counters.num_general_moves;
892 const int64_t prev_value = state_->solution[var];
893 DCHECK_NE(prev_value, new_value);
894 state_->solution[var] = new_value;
895
896 // Update the linear part and non-linear part.
897 evaluator_->UpdateLinearScores(var, prev_value, new_value, ScanWeights(),
898 jumps_.Deltas(), jumps_.MutableScores());
899 evaluator_->UpdateNonLinearViolations(var, prev_value, state_->solution);
900 evaluator_->UpdateViolatedList();
901 var_domains_.OnValueChange(var, new_value);
902
903 if (state_->options.use_compound_moves && !backtrack) {
904 // `!backtrack` is just an optimisation - we can never break any new
905 // constraints on backtrack, so we can never change any
906 // compound_weight_.
907 for (const auto& c : evaluator_->last_update_violation_changes()) {
908 if (evaluator_->IsViolated(c) &&
909 state_->compound_weights[c] != state_->weights[c]) {
910 state_->compound_weights[c] = state_->weights[c];
911 if (!state_->in_compound_weight_changed[c]) {
912 state_->in_compound_weight_changed[c] = true;
913 state_->compound_weight_changed.push_back(c);
914 }
915 for (const int v : evaluator_->ConstraintToVars(c)) {
916 jumps_.Recompute(v);
917 // Vars will be added in MarkJumpsThatNeedToBeRecomputed().
918 }
919 } else if (!evaluator_->IsViolated(c) &&
920 !state_->in_compound_weight_changed[c] &&
921 state_->compound_weights[c] == state_->weights[c]) {
922 state_->in_compound_weight_changed[c] = true;
923 state_->compound_weight_changed.push_back(c);
924 }
925 }
926 }
927
928 // Check that the score for undoing the move is -score with both the
929 // default weights (which may be `state_->weights` or
930 // `state_->compound_weights`), and with `weights` explicitly.
931 // TODO(user): Re-enable DCHECK.
932 if (/* DISABLES CODE */ false && !state_->options.use_decay) {
933 DCHECK_EQ(-score, ComputeScore(state_->weights, var,
934 prev_value - new_value, false));
935 DCHECK_EQ(-score, ComputeScore(ScanWeights(), var,
936 prev_value - new_value, false));
937 }
938
939 MarkJumpsThatNeedToBeRecomputed(var);
940 if (var_domains_.HasTwoValues(var)) {
941 // We already know the score of the only possible move (undoing what we
942 // just did).
943 jumps_.SetJump(var, prev_value - new_value, -score);
944 }
945
946 if (state_->options.use_compound_moves && !backtrack) {
947 // Make sure we can undo the move.
948 DCHECK_NE(prev_value, state_->solution[var]);
949 state_->move->Push(var, prev_value, score);
950 if (state_->move->Score() < 0) {
951 state_->counters.num_compound_moves += state_->move->Size();
952 state_->move->Clear();
953 state_->compound_move_max_discrepancy = 0;
954 }
955 }
956 continue;
957 } else if (time_limit_crossed_) {
958 return false;
959 }
960
961 DCHECK_EQ(state_->move->Size(), 0);
962 if (evaluator_->ViolatedConstraints().empty()) return true;
963 if (state_->options.use_compound_moves) {
964 ResetChangedCompoundWeights();
965 }
966 if (!state_->options.use_compound_moves ||
967 ++state_->compound_move_max_discrepancy > 2) {
968 state_->compound_move_max_discrepancy = 0;
969 UpdateViolatedConstraintWeights();
970 }
971 }
972 return false;
973}
974
975void FeasibilityJumpSolver::ResetChangedCompoundWeights() {
976 if (!state_->options.use_compound_moves) return;
977 DCHECK_EQ(state_->move->Size(), 0);
978 num_ops_ += state_->compound_weight_changed.size();
979 for (const int c : state_->compound_weight_changed) {
980 state_->in_compound_weight_changed[c] = false;
981 const double expected_weight =
982 (evaluator_->IsViolated(c) ? 1.0 : kCompoundDiscount) *
983 state_->weights[c];
984 if (state_->compound_weights[c] == expected_weight) continue;
985 state_->compound_weights[c] = expected_weight;
986 num_ops_ += evaluator_->ConstraintToVars(c).size();
987 for (const int var : evaluator_->ConstraintToVars(c)) {
988 jumps_.Recompute(var);
989 AddVarToScan(var);
990 }
991 }
992 state_->compound_weight_changed.clear();
993}
994
995bool FeasibilityJumpSolver::ShouldExtendCompoundMove(double score,
996 double novelty) {
997 if (state_->move->Score() + score - std::max(novelty, 0.0) < 0) {
998 return true;
999 }
1000 return score < state_->move->BestChildScore();
1001}
1002
1003bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan,
1004 int* best_var,
1005 int64_t* best_value,
1006 double* best_score) {
1007 if (time_limit_crossed_) return false;
1008 if (state_->move->Discrepancy() > state_->compound_move_max_discrepancy) {
1009 return false;
1010 }
1011 double best_scan_score = 0.0;
1012 int num_good = 0;
1013 int best_index = -1;
1014 *best_var = -1;
1015 *best_score = 0.0;
1016
1017 auto remove_var_to_scan_at_index = [&](int index) {
1018 in_vars_to_scan_[vars_to_scan_[index]] = false;
1019 vars_to_scan_[index] = vars_to_scan_.back();
1020 vars_to_scan_.pop_back();
1021 if (best_index == vars_to_scan_.size()) {
1022 best_index = index;
1023 }
1024 };
1025 while (!vars_to_scan_.empty() && num_good < num_to_scan) {
1026 num_ops_ += 6; // We are slow here.
1027 const int index = absl::Uniform<int>(random_, 0, vars_to_scan_.size());
1028 const int var = vars_to_scan_[index];
1029 DCHECK_GE(var, 0);
1030 DCHECK(in_vars_to_scan_[var]);
1031
1032 if (!ShouldScan(var)) {
1033 remove_var_to_scan_at_index(index);
1034 continue;
1035 }
1036 const auto [delta, scan_score] = jumps_.GetJump(var);
1037 if ((state_->counters.num_general_evals +
1038 state_->counters.num_linear_evals) %
1039 100 ==
1040 0 &&
1041 shared_time_limit_ != nullptr && shared_time_limit_->LimitReached()) {
1042 time_limit_crossed_ = true;
1043 return false;
1044 }
1045 const int64_t current_value = state_->solution[var];
1046 DCHECK(var_domains_[var].Contains(current_value + delta))
1047 << var << " " << current_value << "+" << delta << " not in "
1048 << var_domains_[var].ToString();
1049 DCHECK(!var_domains_[var].IsFixed());
1050 if (scan_score >= 0) {
1051 remove_var_to_scan_at_index(index);
1052 continue;
1053 }
1054 double score = scan_score;
1055 if (state_->options.use_compound_moves) {
1056 // We only use compound moves in general iterations.
1057 score = ComputeScore(
1058 state_->weights, var, delta,
1059 /*linear_only=*/!var_occurs_in_non_linear_constraint_[var]);
1060 if (!ShouldExtendCompoundMove(score, score - scan_score)) {
1061 remove_var_to_scan_at_index(index);
1062 continue;
1063 }
1064 }
1065
1066 ++num_good;
1067 if (scan_score < best_scan_score) {
1068 DCHECK_NE(delta, 0) << score;
1069 *best_var = var;
1070 *best_value = current_value + delta;
1071 *best_score = score;
1072 best_scan_score = scan_score;
1073 best_index = index;
1074 }
1075 }
1076 if (best_index != -1) {
1077 DCHECK_GT(num_good, 0);
1078 DCHECK_GE(*best_var, 0);
1079 DCHECK_EQ(*best_var, vars_to_scan_[best_index]);
1080 remove_var_to_scan_at_index(best_index);
1081 return true;
1082 }
1083 return false;
1084}
1085
1086void FeasibilityJumpSolver::AddVarToScan(int var) {
1087 DCHECK_GE(var, 0);
1088 if (in_vars_to_scan_[var]) return;
1089 if (!ShouldScan(var)) return;
1090 vars_to_scan_.push_back(var);
1091 in_vars_to_scan_[var] = true;
1092}
1093
1094bool FeasibilityJumpSolver::ShouldScan(int var) const {
1095 DCHECK_GE(var, 0);
1096
1097 if (state_->move->OnStack(var)) return false;
1098
1099 if (!jumps_.NeedRecomputation(var)) {
1100 // We already have the score/jump of that variable.
1101 const double score = jumps_.Score(var);
1102 return score < 0.0;
1103 }
1104
1105 // See RecomputeVarsToScan(), we shouldn't have any fixed variable here.
1106 DCHECK(!var_domains_.IsFixed(var));
1107
1108 // Return true iff var is has a better objective value in its domain.
1109 if (var_domains_.HasBetterObjectiveValue(var)) return true;
1110
1111 // We will need to recompute the score. Lets skip variable for which we known
1112 // in advance that there will be no good score.
1113 //
1114 // For the objective, we don't care if it is violated or not, we only want
1115 // to scan variable that might improve it (and thus reduce its violation if it
1116 // is violated).
1117 //
1118 // TODO(user): We should generalize the objective logic to all constraint.
1119 // There is no point scanning a variable of a violated constraint if it is at
1120 // the wrong bound and cannot improve the violation!
1121 return evaluator_->NumViolatedConstraintsForVarIgnoringObjective(var) > 0;
1122}
1123
1124void FeasibilityJumpSolver::RecomputeVarsToScan() {
1125 const int num_variables = var_domains_.size();
1126 jumps_.RecomputeAll(num_variables);
1127 DCHECK(SlowCheckNumViolatedConstraints());
1128
1129 in_vars_to_scan_.assign(num_variables, false);
1130 vars_to_scan_.clear();
1131
1132 // Since the fixed status never changes during one batch, we marks such
1133 // variable as "in_vars_to_scan_" even if we don't add them here. This allow
1134 // to skip them without any extra lookup.
1135 for (const int var : var_domains_.FixedVariables()) {
1136 in_vars_to_scan_[var] = true;
1137 }
1138
1139 num_ops_ += evaluator_->ViolatedConstraints().size();
1140 for (const int c : evaluator_->ViolatedConstraints()) {
1141 num_ops_ += evaluator_->ConstraintToVars(c).size();
1142 for (const int v : evaluator_->ConstraintToVars(c)) {
1143 AddVarToScan(v);
1144 }
1145 }
1146}
1147
1148bool FeasibilityJumpSolver::SlowCheckNumViolatedConstraints() const {
1149 std::vector<int> result;
1150 result.assign(var_domains_.size(), 0);
1151 for (const int c : evaluator_->ViolatedConstraints()) {
1152 if (evaluator_->IsObjectiveConstraint(c)) continue;
1153 for (const int v : evaluator_->ConstraintToVars(c)) {
1154 ++result[v];
1155 }
1156 }
1157 for (int v = 0; v < result.size(); ++v) {
1158 CHECK_EQ(result[v],
1159 evaluator_->NumViolatedConstraintsForVarIgnoringObjective(v));
1160 }
1161 return true;
1162}
1163
1165 for (const UnitMove& move : stack_) {
1166 var_on_stack_[move.var] = false;
1167 }
1168 stack_.clear();
1169}
1170
1171bool CompoundMoveBuilder::OnStack(int var) const {
1172 return !stack_.empty() && var_on_stack_[var];
1173}
1174
1175bool CompoundMoveBuilder::Backtrack(int* var, int64_t* value, double* score) {
1176 if (stack_.empty()) return false;
1177 *var = stack_.back().var;
1178 *value = stack_.back().prev_value;
1179 *score = stack_.back().score;
1180 var_on_stack_[*var] = false;
1181 stack_.pop_back();
1182 if (!stack_.empty()) {
1183 ++stack_.back().discrepancy;
1184 }
1185 return true;
1186}
1187
1188void CompoundMoveBuilder::Push(int var, int64_t prev_value, double score) {
1189 DCHECK(!var_on_stack_[var]);
1190 if (!stack_.empty()) {
1191 stack_.back().best_child_score =
1192 std::min(stack_.back().best_child_score, score);
1193 }
1194 var_on_stack_[var] = true;
1195 stack_.push_back({
1196 .var = var,
1197 .prev_value = prev_value,
1198 .score = -score,
1199 .cumulative_score = Score() + score,
1200 .discrepancy = Discrepancy(),
1201 });
1202}
1203
1205 absl::Span<const Domain> var_domains) const {
1206 for (const UnitMove& move : stack_) {
1207 if (!var_domains[move.var].Contains(move.prev_value)) return false;
1208 }
1209 return true;
1210}
1211
1212} // namespace operations_research::sat
bool StackValuesInDomains(absl::Span< const Domain > var_domains) const
Returns true if all prev_values on the stack are in the appropriate domain.
double Score() const
Returns the sum of scores of atomic moved pushed to this compound move.
void Clear()
Removes all moves on the stack.
int Discrepancy() const
Returns the number of backtracks to any ancestor of the current leaf.
bool OnStack(int var) const
Returns true if this var has been set in this move already,.
bool Backtrack(int *var, int64_t *value, double *score)
void Push(int var, int64_t prev_value, double score)
~FeasibilityJumpSolver() override
If VLOG_IS_ON(1), it will export a bunch of statistics.
std::function< void()> GenerateTask(int64_t) final
void Recompute(int var)
Recompute the jump for var when GetJump(var) is next called.
std::pair< int64_t, double > GetJump(int var)
Gets the current jump delta and score, recomputing if necessary.
void SetComputeFunction(absl::AnyInvocable< std::pair< int64_t, double >(int) const > compute_jump)
void SetJump(int var, int64_t delta, double score)
void CollectStatistics(const LsState &state)
Accumulate in the relevant bucket the counters of the given states.
void AddTaskDeterministicDuration(double deterministic_duration)
Definition subsolver.h:114
SubsolverType type() const
Returns the type of the subsolver.
Definition subsolver.h:98
std::string name() const
Returns the name of this SubSolver. Used in logs.
Definition subsolver.h:95
bool SolutionIsFeasible(const CpModelProto &model, absl::Span< const int64_t > variable_values, const CpModelProto *mapping_proto, const std::vector< int > *postsolve_mapping)
std::function< bool(const Model &)> IsFixed(IntegerVariable v)
Definition integer.h:1593
std::string FormatCounter(int64_t num)
Prints a positive number with separators for easier reading (ex: 1'348'065).
Definition util.cc:44
Domain ReadDomainFromProto(const ProtoWithDomain &proto)
Reads a Domain from the domain field of a proto.
PushedSolutionPointers PushAndMaybeCombineSolution(SharedResponseManager *response_manager, const CpModelProto &model_proto, absl::Span< const int64_t > new_solution, const std::string &solution_info, absl::Span< const int64_t > base_solution, Model *model)
Select next search node to expand Select next item_i to add this new search node to the search Generate a new search node where item_i is not in the knapsack Check validity of this new partial solution(using propagators) - If valid
std::pair< Point, Value > RangeConvexMinimum(Point begin, Point end, absl::FunctionRef< Value(Point)> f)
Searches in the range [begin, end), where Point supports basic arithmetic.
std::pair< Point, Value > ConvexMinimum(absl::Span< const Point > sorted_points, std::function< Value(Point)> f)
STL namespace.
Definition model.h:51
int64_t Max() const
Returns the max of the domain.
Definition model.cc:346
int64_t Min() const
Returns the min of the domain.
Definition model.cc:340
bool Contains(int64_t value) const
Various inclusion tests on a domain.
Definition model.cc:363
LsCounters counters
Counters for a "non-restarted" run.
std::unique_ptr< CompoundMoveBuilder > move