Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
feasibility_jump.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdlib.h>
17
18#include <algorithm>
19#include <atomic>
20#include <cmath>
21#include <cstdint>
22#include <functional>
23#include <limits>
24#include <memory>
25#include <string>
26#include <tuple>
27#include <utility>
28#include <vector>
29
30#include "absl/functional/any_invocable.h"
31#include "absl/functional/bind_front.h"
32#include "absl/functional/function_ref.h"
33#include "absl/log/check.h"
34#include "absl/random/bit_gen_ref.h"
35#include "absl/random/distributions.h"
36#include "absl/strings/str_cat.h"
37#include "absl/types/span.h"
41#include "ortools/sat/cp_model.pb.h"
44#include "ortools/sat/integer.h"
46#include "ortools/sat/sat_parameters.pb.h"
49#include "ortools/sat/util.h"
52
54
55namespace {
56// How much do we discount moves we might fix later.
57constexpr double kCompoundDiscount = 1. / 1024;
58} // namespace
59
61 absl::AnyInvocable<std::pair<int64_t, double>(int) const> compute_jump) {
62 compute_jump_ = std::move(compute_jump);
63}
64
65void JumpTable::RecomputeAll(int num_variables) {
66 deltas_.resize(num_variables);
67 scores_.resize(num_variables);
68 needs_recomputation_.assign(num_variables, true);
69}
70
71void JumpTable::SetJump(int var, int64_t delta, double score) {
72 deltas_[var] = delta;
73 scores_[var] = score;
74 needs_recomputation_[var] = false;
75}
76
77void JumpTable::Recompute(int var) { needs_recomputation_[var] = true; }
78
80 const auto& [delta, score] = compute_jump_(var);
81 if (delta != deltas_[var]) {
82 LOG(ERROR) << "Incorrect delta for var " << var << ": " << deltas_[var]
83 << " (should be " << delta << ")";
84 }
85 bool score_ok = true;
86 if (abs(score - scores_[var]) / std::max(abs(score), 1.0) > 1e-2) {
87 score_ok = false;
88 LOG(ERROR) << "Incorrect score for var " << var << ": " << scores_[var]
89 << " (should be " << score << ") " << " delta = " << delta;
90 }
91 return delta == deltas_[var] && score_ok;
92}
93
94std::pair<int64_t, double> JumpTable::GetJump(int var) {
95 if (needs_recomputation_[var]) {
96 needs_recomputation_[var] = false;
97 std::tie(deltas_[var], scores_[var]) = compute_jump_(var);
98 }
99 return std::make_pair(deltas_[var], scores_[var]);
100}
101
103 // Do a final collection.
104 for (int i = 0; i < states_.size(); ++i) {
105 CollectStatistics(*states_[i].get());
106 }
107
108 // Display aggregated states
109 for (const auto& [options, counters] : options_to_stats_) {
110 stat_tables_->AddLsStat(
111 absl::StrCat(name_, "_", options.name()), counters.num_batches,
112 options_to_num_restarts_[options] + counters.num_perturbations,
113 counters.num_linear_moves, counters.num_general_moves,
114 counters.num_compound_moves, counters.num_backtracks,
115 counters.num_weight_updates, counters.num_scores_computed);
116 }
117}
118
122
123void FeasibilityJumpSolver::ImportState() {
124 state_ = states_->GetNextState();
125 if (state_->move == nullptr) {
126 const int num_variables = var_domains_.size();
127 state_->move = std::make_unique<CompoundMoveBuilder>(num_variables);
128 }
129}
130
131void FeasibilityJumpSolver::ReleaseState() { states_->Release(state_); }
132
133void FeasibilityJumpSolver::Initialize() {
134 is_initialized_ = true;
135
136 // For now we just disable or enable it.
137 // But in the future we might have more variation.
138 if (params_.feasibility_jump_linearization_level() == 0) {
139 evaluator_ =
140 std::make_unique<LsEvaluator>(linear_model_->model_proto(), params_);
141 } else {
142 evaluator_ =
143 std::make_unique<LsEvaluator>(linear_model_->model_proto(), params_,
144 linear_model_->ignored_constraints(),
145 linear_model_->additional_constraints());
146 }
147
148 const int num_variables = linear_model_->model_proto().variables().size();
149 var_domains_.resize(num_variables);
150 for (int v = 0; v < num_variables; ++v) {
151 var_domains_.Set(
152 v, ReadDomainFromProto(linear_model_->model_proto().variables(v)));
153 }
154 var_domains_.InitializeObjective(linear_model_->model_proto());
155
156 vars_to_scan_.ClearAndReserve(num_variables);
157 var_occurs_in_non_linear_constraint_.resize(num_variables);
158 for (int c = 0; c < evaluator_->NumNonLinearConstraints(); ++c) {
159 for (int v : evaluator_->GeneralConstraintToVars(c)) {
160 var_occurs_in_non_linear_constraint_[v] = true;
161 }
162 }
163}
164
165namespace {
166
167int64_t ComputeRange(int64_t range, double range_ratio) {
168 return static_cast<int64_t>(
169 std::ceil(static_cast<double>(range) * range_ratio));
170}
171
172// TODO(user): Optimize and move to the Domain class.
173// TODO(user): Improve entropy on non continuous domains.
174int64_t RandomValueNearMin(const Domain& domain, double range_ratio,
175 absl::BitGenRef random) {
176 if (domain.Size() == 1) return domain.FixedValue();
177 if (domain.Size() == 2) {
178 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Min()
179 : domain.Max();
180 }
181 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
182 return domain.ValueAtOrBefore(domain.Min() +
183 absl::LogUniform<int64_t>(random, 0, range));
184}
185
186int64_t RandomValueNearMax(const Domain& domain, double range_ratio,
187 absl::BitGenRef random) {
188 if (domain.Size() == 1) return domain.FixedValue();
189 if (domain.Size() == 2) {
190 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Max()
191 : domain.Min();
192 }
193 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
194 return domain.ValueAtOrAfter(domain.Max() -
195 absl::LogUniform<int64_t>(random, 0, range));
196}
197
198int64_t RandomValueNearValue(const Domain& domain, int64_t value,
199 double range_ratio, absl::BitGenRef random) {
200 DCHECK(!domain.IsFixed());
201
202 if (domain.Min() >= value) {
203 return RandomValueNearMin(domain, range_ratio, random);
204 }
205
206 if (domain.Max() <= value) {
207 return RandomValueNearMax(domain, range_ratio, random);
208 }
209
210 // Split up or down, and choose value in split domain.
211 const Domain greater_domain =
212 domain.IntersectionWith({value + 1, domain.Max()});
213 const double choose_greater_probability =
214 static_cast<double>(greater_domain.Size()) /
215 static_cast<double>(domain.Size() - 1);
216 if (absl::Bernoulli(random, choose_greater_probability)) {
217 return RandomValueNearMin(greater_domain, range_ratio, random);
218 } else {
219 return RandomValueNearMax(
220 domain.IntersectionWith({domain.Min(), value - 1}), range_ratio,
221 random);
222 }
223}
224
225} // namespace
226
227void FeasibilityJumpSolver::ResetCurrentSolution(
228 bool use_hint, bool use_objective, double perturbation_probability) {
229 const int num_variables = linear_model_->model_proto().variables().size();
230 const double default_value_probability = 1.0 - perturbation_probability;
231 const double range_ratio =
232 params_.feasibility_jump_var_perburbation_range_ratio();
233 std::vector<int64_t>& solution = state_->solution;
234
235 // Resize the solution if needed.
236 solution.resize(num_variables);
237
238 // Starts with values closest to zero.
239 for (int var = 0; var < num_variables; ++var) {
240 if (var_domains_[var].IsFixed()) {
241 solution[var] = var_domains_[var].FixedValue();
242 continue;
243 }
244
245 if (absl::Bernoulli(random_, default_value_probability)) {
246 solution[var] = var_domains_[var].SmallestValue();
247 } else {
248 solution[var] =
249 RandomValueNearValue(var_domains_[var], 0, range_ratio, random_);
250 }
251 }
252
253 // Use objective half of the time (if the model has one).
254 if (use_objective && linear_model_->model_proto().has_objective()) {
255 const int num_terms =
256 linear_model_->model_proto().objective().vars().size();
257 for (int i = 0; i < num_terms; ++i) {
258 const int var = linear_model_->model_proto().objective().vars(i);
259 if (var_domains_[var].IsFixed()) continue;
260
261 if (linear_model_->model_proto().objective().coeffs(i) > 0) {
262 if (absl::Bernoulli(random_, default_value_probability)) {
263 solution[var] = var_domains_[var].Min();
264 } else {
265 solution[var] =
266 RandomValueNearMin(var_domains_[var], range_ratio, random_);
267 }
268 } else {
269 if (absl::Bernoulli(random_, default_value_probability)) {
270 solution[var] = var_domains_[var].Max();
271 } else {
272 solution[var] =
273 RandomValueNearMax(var_domains_[var], range_ratio, random_);
274 }
275 }
276 }
277 }
278
279 if (use_hint && linear_model_->model_proto().has_solution_hint()) {
280 const auto& hint = linear_model_->model_proto().solution_hint();
281 for (int i = 0; i < hint.vars().size(); ++i) {
282 solution[hint.vars(i)] = hint.values(i);
283 }
284 }
285}
286
287void FeasibilityJumpSolver::PerturbateCurrentSolution(
288 double perturbation_probability) {
289 if (perturbation_probability == 0.0) return;
290 const int num_variables = linear_model_->model_proto().variables().size();
291 const double perturbation_ratio =
292 params_.feasibility_jump_var_perburbation_range_ratio();
293 std::vector<int64_t>& solution = state_->solution;
294 for (int var = 0; var < num_variables; ++var) {
295 if (var_domains_[var].IsFixed()) continue;
296 if (absl::Bernoulli(random_, perturbation_probability)) {
297 solution[var] = RandomValueNearValue(var_domains_[var], solution[var],
298 perturbation_ratio, random_);
299 }
300 }
301}
302
303std::string FeasibilityJumpSolver::OneLineStats() const {
304 // Moves and evaluations in the general iterations.
305 const std::string general_str =
306 state_->counters.num_general_evals == 0 &&
307 state_->counters.num_general_moves == 0
308 ? ""
309 : absl::StrCat(
310 " gen{mvs:", FormatCounter(state_->counters.num_general_moves),
311 " evals:", FormatCounter(state_->counters.num_general_evals),
312 "}");
313 const std::string compound_str =
314 state_->counters.num_compound_moves == 0 &&
315 state_->counters.num_backtracks == 0
316 ? ""
317 : absl::StrCat(" comp{mvs:",
319 " btracks:",
321
322 // Improving jumps and infeasible constraints.
323 const int num_infeasible_cts = evaluator_->NumInfeasibleConstraints();
324 const std::string non_solution_str =
325 num_infeasible_cts == 0
326 ? ""
327 : absl::StrCat(" #good_moves:", FormatCounter(vars_to_scan_.size()),
328 " #inf_cts:",
329 FormatCounter(evaluator_->NumInfeasibleConstraints()));
330
331 return absl::StrCat(
332 "batch:", state_->counters.num_batches,
333 " lin{mvs:", FormatCounter(state_->counters.num_linear_moves),
334 " evals:", FormatCounter(state_->counters.num_linear_evals), "}",
335 general_str, compound_str, non_solution_str,
336 " #w_updates:", FormatCounter(state_->counters.num_weight_updates),
337 " #perturb:", FormatCounter(state_->counters.num_perturbations));
338}
339
340std::function<void()> FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) {
341 task_generated_ = true; // Atomic.
342
343 return [this] {
344 // We delay initialization to the first task as it might be a bit slow
345 // to scan the whole model, so we want to do this part in parallel.
346 if (!is_initialized_) Initialize();
347
348 // Load the next state to work on.
349 ImportState();
350
351 // If we found a new best solution, we will restart all violation ls (we
352 // still finish each batch though). We will also reset the luby sequence.
353 bool new_best_solution_was_found = false;
354 if (type() == SubSolver::INCOMPLETE) {
355 const int64_t best =
356 shared_response_->SolutionsRepository().GetBestRank();
357 if (best < state_->last_solution_rank) {
358 states_->ResetLubyCounter();
359 new_best_solution_was_found = true;
360 state_->last_solution_rank = best;
361 }
362 }
363
364 bool reset_weights = false;
365 if (new_best_solution_was_found || state_->num_batches_before_change <= 0) {
366 reset_weights = true;
367 if (state_->options.use_restart) {
368 states_->CollectStatistics(*state_);
369 state_->options.Randomize(params_, &random_);
370 state_->counters = LsCounters(); // Reset.
371 } else {
372 state_->options.Randomize(params_, &random_);
373 }
374 if (type() == SubSolver::INCOMPLETE) {
375 // This is not used once we have a solution, and setting it to false
376 // allow to fix the logs.
377 state_->options.use_objective = false;
378 }
379
380 const bool first_time = (state_->num_restarts == 0);
381 if (state_->options.use_restart || first_time ||
382 new_best_solution_was_found) {
383 if (type() == SubSolver::INCOMPLETE) {
384 // Choose a base solution for this neighborhood.
387 random_);
388 state_->solution = solution.variable_values;
389 ++state_->num_solutions_imported;
390 } else {
391 ResetCurrentSolution(/*use_hint=*/first_time,
392 state_->options.use_objective,
394 }
395 } else {
396 PerturbateCurrentSolution(
397 params_.feasibility_jump_var_randomization_probability());
398 }
399
400 if (state_->options.use_restart) {
401 ++state_->num_restarts;
402 states_->ConfigureNextLubyRestart(state_);
403 } else {
404 // TODO(user): Tune the improvement constant, maybe use luby.
405 ++state_->counters.num_perturbations;
407 params_.violation_ls_perturbation_period();
408 }
409 }
410
411 // Between chunk, we synchronize bounds.
412 bool recompute_compound_weights = false;
413 if (linear_model_->model_proto().has_objective()) {
414 const IntegerValue lb = shared_response_->GetInnerObjectiveLowerBound();
415 const IntegerValue ub = shared_response_->GetInnerObjectiveUpperBound();
416 if (lb != state_->saved_inner_objective_lb ||
417 ub != state_->saved_inner_objective_ub) {
418 recompute_compound_weights = true;
419 }
420 state_->saved_inner_objective_lb = lb;
421 state_->saved_inner_objective_ub = ub;
422
423 if (ub < lb) return; // Search is finished.
424 if (evaluator_->ReduceObjectiveBounds(lb.value(), ub.value())) {
425 recompute_compound_weights = true;
426 }
427 }
428
429 // Update the variable domains with the last information.
430 if (!var_domains_.UpdateFromSharedBounds()) return;
431
432 // Checks the current solution is compatible with updated domains.
433 {
434 // Make sure the solution is within the potentially updated domain.
435 // This also initialize var_domains_.CanIncrease()/CanDecrease().
436 const int num_vars = state_->solution.size();
437 for (int var = 0; var < num_vars; ++var) {
438 const int64_t old_value = state_->solution[var];
439 const int64_t new_value = var_domains_[var].ClosestValue(old_value);
440 if (new_value != old_value) {
441 state_->solution[var] = new_value;
442 recompute_compound_weights = true;
443 }
444 var_domains_.OnValueChange(var, new_value);
445 }
446 // Check if compound move search might backtrack out of the new domains.
447 if (!state_->move->StackValuesInDomains(var_domains_.AsSpan())) {
448 recompute_compound_weights = true;
449 }
450 }
451
452 // Search for feasible solution.
453 // We always recompute that since we might have loaded from a different
454 // state.
455 evaluator_->ComputeAllViolations(state_->solution);
456
457 if (reset_weights) {
458 state_->bump_value = 1.0;
459 state_->weights.assign(evaluator_->NumEvaluatorConstraints(), 1.0);
460 recompute_compound_weights = true;
461 }
462 if (recompute_compound_weights) {
463 state_->move->Clear();
464 if (state_->options.use_compound_moves) {
465 state_->compound_weights.assign(state_->weights.begin(),
466 state_->weights.end());
467 for (int c = 0; c < state_->weights.size(); ++c) {
468 if (evaluator_->IsViolated(c)) continue;
469 state_->compound_weights[c] *= kCompoundDiscount;
470 }
471 state_->compound_weight_changed.clear();
472 state_->in_compound_weight_changed.assign(state_->weights.size(),
473 false);
475 }
476 }
477
478 if (!state_->options.use_compound_moves) {
479 DCHECK_EQ(state_->move->Size(), 0);
480 }
481
482 ++state_->counters.num_batches;
483 if (DoSomeLinearIterations() && DoSomeGeneralIterations()) {
484 // Checks for infeasibility induced by the non supported constraints.
485 if (SolutionIsFeasible(linear_model_->model_proto(), state_->solution)) {
486 shared_response_->NewSolution(
487 state_->solution, absl::StrCat(name(), "_", state_->options.name(),
488 "(", OneLineStats(), ")"));
489 } else {
490 shared_response_->LogMessage(name(), "infeasible solution. Aborting.");
491 model_is_supported_ = false;
492 }
493 }
494
495 // Update dtime.
496 // Since we execute only one task at the time, this is safe.
497 {
498 // TODO(user): Find better names. DeterministicTime() is maintained by
499 // this class while deterministic_time() is the one saved in the SubSolver
500 // base class).
501 const double current_dtime = DeterministicTime();
502 const double delta = current_dtime - deterministic_time();
503
504 // Because deterministic_time() is computed with a sum of difference, it
505 // might be slighlty different than DeterministicTime() and we don't want
506 // to go backward, even by 1e-18.
507 if (delta >= 0) {
509 shared_time_limit_->AdvanceDeterministicTime(delta);
510 }
511 }
512
513 ReleaseState();
514 task_generated_ = false; // Atomic.
515 };
516}
517
518double FeasibilityJumpSolver::ComputeScore(absl::Span<const double> weights,
519 int var, int64_t delta,
520 bool linear_only) {
522 double score = evaluator_->WeightedViolationDelta(
523 linear_only, weights, var, delta, absl::MakeSpan(state_->solution));
524 constexpr double kEpsilon = 1.0 / std::numeric_limits<int64_t>::max();
525 score += kEpsilon * delta * evaluator_->ObjectiveCoefficient(var);
526 return score;
527}
528
529std::pair<int64_t, double> FeasibilityJumpSolver::ComputeLinearJump(int var) {
530 DCHECK(!var_domains_[var].IsFixed());
531 const int64_t current_value = state_->solution[var];
532
533 ++state_->counters.num_linear_evals;
534 const LinearIncrementalEvaluator& linear_evaluator =
535 evaluator_->LinearEvaluator();
536
537 if (var_domains_.HasTwoValues(var)) {
538 const int64_t min_value = var_domains_[var].Min();
539 const int64_t max_value = var_domains_[var].Max();
540 const int64_t delta = current_value == min_value ? max_value - min_value
541 : min_value - max_value;
542 return std::make_pair(
543 delta, ComputeScore(ScanWeights(), var, delta, /*linear_only=*/true));
544 }
545
546 // In practice, after a few iterations, the chance of finding an improving
547 // move is slim, and we can test that fairly easily with at most two
548 // queries!
549 //
550 // Tricky/Annoying: if the value is not in the domain, we returns it.
551 const int64_t p1 = var_domains_[var].ValueAtOrBefore(current_value - 1);
552 const int64_t p2 = var_domains_[var].ValueAtOrAfter(current_value + 1);
553
554 std::pair<int64_t, double> best_jump;
555 const double v1 = var_domains_[var].Contains(p1)
556 ? ComputeScore(ScanWeights(), var, p1 - current_value,
557 /*linear_only=*/true)
558 : std::numeric_limits<double>::infinity();
559 if (v1 < 0.0) {
560 // Point p1 is improving. Look for best before it.
561 // Note that we can exclude all point after current_value since it is
562 // worse and we assume convexity.
563 const Domain dom = var_domains_[var].IntersectionWith(
564 Domain(std::numeric_limits<int64_t>::min(), p1 - 1));
565 if (dom.IsEmpty()) {
566 best_jump = {p1, v1};
567 } else {
568 tmp_breakpoints_ =
569 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
571 /*is_to_the_right=*/true, {p1, v1}, tmp_breakpoints_,
572 [this, var, current_value](int64_t jump_value) {
573 return ComputeScore(ScanWeights(), var, jump_value - current_value,
574 /*linear_only=*/true);
575 });
576 }
577 } else {
578 const double v2 = var_domains_[var].Contains(p2)
579 ? ComputeScore(ScanWeights(), var, p2 - current_value,
580 /*linear_only=*/true)
581 : std::numeric_limits<double>::infinity();
582 if (v2 < 0.0) {
583 // Point p2 is improving. Look for best after it.
584 // Similarly, we exclude the other points by convexity.
585 const Domain dom = var_domains_[var].IntersectionWith(
586 Domain(p2 + 1, std::numeric_limits<int64_t>::max()));
587 if (dom.IsEmpty()) {
588 best_jump = {p2, v2};
589 } else {
590 tmp_breakpoints_ =
591 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
593 /*is_to_the_right=*/false, {p2, v2}, tmp_breakpoints_,
594 [this, var, current_value](int64_t jump_value) {
595 return ComputeScore(ScanWeights(), var,
596 jump_value - current_value,
597 /*linear_only=*/true);
598 });
599 }
600 } else {
601 // We have no improving point, result is either p1 or p2. This is the
602 // most common scenario, and require no breakpoint computation!
603 // Choose the direction which increases violation the least,
604 // disambiguating by best objective.
605 if (v1 < v2) {
606 best_jump = {p1, v1};
607 } else {
608 best_jump = {p2, v2};
609 }
610 }
611 }
612 DCHECK_NE(best_jump.first, current_value);
613 return std::make_pair(best_jump.first - current_value, best_jump.second);
614}
615
616std::pair<int64_t, double> FeasibilityJumpSolver::ComputeGeneralJump(int var) {
617 if (!var_occurs_in_non_linear_constraint_[var]) {
618 return ComputeLinearJump(var);
619 }
620 Domain domain = var_domains_[var];
621 if (domain.IsFixed()) return std::make_pair(0, 0.0);
622
623 ++state_->counters.num_general_evals;
624 const int64_t current_value = state_->solution[var];
625 domain = domain.IntersectionWith(
626 Domain(current_value, current_value).Complement());
627 std::pair<int64_t, double> result;
628 for (int i = 0; i < domain.NumIntervals(); ++i) {
629 const int64_t min_delta = domain[i].start - current_value;
630 const int64_t max_delta = domain[i].end - current_value;
631 const auto& [delta, score] = RangeConvexMinimum<int64_t, double>(
632 min_delta, max_delta + 1, [&](int64_t delta) -> double {
633 return ComputeScore(ScanWeights(), var, delta, /*linear_only=*/false);
634 });
635 if (i == 0 || score < result.second) {
636 result = std::make_pair(delta, score);
637 }
638 }
639 DCHECK(domain.Contains(current_value + result.first))
640 << current_value << "+" << result.first << " not in domain "
641 << domain.ToString();
642 return result;
643}
644
645void FeasibilityJumpSolver::UpdateViolatedConstraintWeights() {
647
648 // Because we update the weight incrementally, it is better to not have a
649 // super high magnitude, otherwise doing +max_weight and then -max_weight
650 // will just ignore any constraint with a small weight and our
651 // DCHECK(JumpIsUpToDate(var)) will fail more often.
652 const double kMaxWeight = 1e10;
653 const double kBumpFactor = 1.0 / params_.feasibility_jump_decay();
654 const int num_variables = var_domains_.size();
655 if (state_->options.use_decay) {
656 state_->bump_value *= kBumpFactor;
657 }
658
659 // Note that ViolatedConstraints() might contain only linear constraint
660 // depending on how it was initialized and updated.
661 bool rescale = false;
662 num_ops_ += evaluator_->ViolatedConstraints().size();
663 for (const int c : evaluator_->ViolatedConstraints()) {
664 DCHECK(evaluator_->IsViolated(c));
665 if (state_->options.use_compound_moves) {
666 DCHECK_EQ(state_->compound_weights[c], state_->weights[c]);
667 }
668 state_->weights[c] += state_->bump_value;
669 if (state_->options.use_compound_moves) {
670 state_->compound_weights[c] = state_->weights[c];
671 }
672 if (state_->weights[c] > kMaxWeight) {
673 rescale = true;
674 }
675 }
676
677 if (rescale) {
678 const double factor = 1.0 / kMaxWeight;
679 state_->bump_value *= factor;
680 for (int c = 0; c < state_->weights.size(); ++c) {
681 state_->weights[c] *= factor;
682 if (state_->options.use_compound_moves) {
683 state_->compound_weights[c] *= factor;
684 }
685 }
686 jumps_.RecomputeAll(num_variables);
687 return;
688 }
689
690 // Update weight incrementally.
691 //
692 // To maximize floating point precision, we compute the change to jump value
693 // first and then apply it in one go. Also, in most situation the change is
694 // purely integer and should fit exactly on a double, so we don't depend on
695 // the order in which constraint are listed.
696 LinearIncrementalEvaluator* linear_evaluator =
697 evaluator_->MutableLinearEvaluator();
698 linear_evaluator->ClearAffectedVariables();
699 for_weight_update_.resize(num_variables);
700 num_ops_ += evaluator_->ViolatedConstraints().size();
701 for (const int c : evaluator_->ViolatedConstraints()) {
702 if (c < evaluator_->NumLinearConstraints()) {
703 linear_evaluator->UpdateScoreOnWeightUpdate(
704 c, jumps_.Deltas(), absl::MakeSpan(for_weight_update_));
705 } else {
706 for (const int v : evaluator_->ConstraintToVars(c)) {
707 jumps_.Recompute(v);
708 AddVarToScan(v);
709 }
710 }
711 }
712
713 // Recompute the affected jumps.
714 // Note that the constraint violations are unaffected.
715 absl::Span<double> scores = jumps_.MutableScores();
716 for (const int var : linear_evaluator->VariablesAffectedByLastUpdate()) {
717 // Apply the delta.
718 //
719 // TODO(user): We could compute the minimal bump that would lead to a
720 // good move. That might change depending on the jump value though, so
721 // we can only do that easily for Booleans.
722 if (!var_domains_.HasTwoValues(var)) {
723 jumps_.Recompute(var);
724 } else {
725 // We may know the correct score for binary vars.
726 scores[var] += state_->bump_value * for_weight_update_[var];
727 }
728 AddVarToScan(var);
729 }
730}
731
732bool FeasibilityJumpSolver::DoSomeLinearIterations() {
733 if (VLOG_IS_ON(1)) {
734 shared_response_->LogMessageWithThrottling(name(), OneLineStats());
735 }
736
737 // TODO(user): It should be possible to support compound moves with
738 // the specialized linear code, but lets keep it simpler for now.
739 if (state_->options.use_compound_moves) return true;
740
741 evaluator_->RecomputeViolatedList(/*linear_only=*/true);
742 jumps_.SetComputeFunction(
743 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
744 RecomputeVarsToScan();
745
746 // Do a batch of a given dtime.
747 // Outer loop: when no more greedy moves, update the weight.
748 const double dtime_threshold =
749 DeterministicTime() + params_.feasibility_jump_batch_dtime();
750 while (DeterministicTime() < dtime_threshold) {
751 // Inner loop: greedy descent.
752 while (DeterministicTime() < dtime_threshold) {
753 // Take the best jump score amongst some random candidates.
754 // It is okay if we pick twice the same, we don't really care.
755 int best_var;
756 int64_t best_value;
757 double best_score;
758 if (!ScanRelevantVariables(/*num_to_scan=*/5, &best_var, &best_value,
759 &best_score)) {
760 break;
761 }
762
763 // Perform the move.
764 ++state_->counters.num_linear_moves;
765 const int64_t prev_value = state_->solution[best_var];
766 state_->solution[best_var] = best_value;
767 evaluator_->UpdateLinearScores(best_var, prev_value, best_value,
768 state_->weights, jumps_.Deltas(),
769 jumps_.MutableScores());
770 evaluator_->UpdateViolatedList();
771 var_domains_.OnValueChange(best_var, best_value);
772
773 MarkJumpsThatNeedToBeRecomputed(best_var);
774 if (var_domains_.HasTwoValues(best_var)) {
775 // We already know the score of undoing the move we just did, and that
776 // this is optimal.
777 jumps_.SetJump(best_var, prev_value - best_value, -best_score);
778 }
779 }
780 if (time_limit_crossed_) return false;
781
782 // We will update the weight unless the queue is non-empty.
783 if (vars_to_scan_.empty()) {
784 // Note that we only count linear constraint as violated here.
785 if (evaluator_->ViolatedConstraints().empty()) return true;
786 UpdateViolatedConstraintWeights();
787 }
788 }
789 return false;
790}
791
792// Update the jump scores.
793//
794// We incrementally maintain the score (except for best_var).
795// However for non-Boolean, we still need to recompute the jump value.
796// We will do that in a lazy fashion.
797//
798// TODO(user): In the paper, they just recompute the scores and only
799// change the jump values when the constraint weight changes. Experiment?
800// Note however that the current code is quite fast.
801//
802// TODO(user): For non-Boolean, we could easily detect if a non-improving
803// score cannot become improving. We don't need to add such variable to
804// the queue.
805void FeasibilityJumpSolver::MarkJumpsThatNeedToBeRecomputed(int changed_var) {
806 // To keep DCHECKs happy. Note that we migh overwrite this afterwards with the
807 // known score/jump of undoing the move.
808 jumps_.Recompute(changed_var);
809
810 // Generic part.
811 // No optimization there, we just update all touched variables.
812 // We need to do this before the Linear part, so that the status is correct in
813 // AddVarToScan() for variable with two values.
814 num_ops_ += evaluator_->VarToGeneralConstraints(changed_var).size();
815 for (const int c : evaluator_->VarToGeneralConstraints(changed_var)) {
816 num_ops_ += evaluator_->GeneralConstraintToVars(c).size();
817 for (const int var : evaluator_->GeneralConstraintToVars(c)) {
818 jumps_.Recompute(var);
819 AddVarToScan(var);
820 }
821 }
822
823 // Linear part.
824 num_ops_ += evaluator_->VariablesAffectedByLastLinearUpdate().size();
825 for (const int var : evaluator_->VariablesAffectedByLastLinearUpdate()) {
826 if (!var_domains_.HasTwoValues(var)) {
827 jumps_.Recompute(var);
828 }
829 AddVarToScan(var);
830 }
831}
832
833bool FeasibilityJumpSolver::DoSomeGeneralIterations() {
834 if (!state_->options.use_compound_moves &&
835 evaluator_->NumNonLinearConstraints() == 0) {
836 return true;
837 }
838 // Non-linear constraints are not evaluated in the linear phase.
839 evaluator_->ComputeAllNonLinearViolations(state_->solution);
840 evaluator_->RecomputeViolatedList(/*linear_only=*/false);
841 if (evaluator_->NumNonLinearConstraints() == 0) {
842 jumps_.SetComputeFunction(
843 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
844 } else {
845 jumps_.SetComputeFunction(
846 absl::bind_front(&FeasibilityJumpSolver::ComputeGeneralJump, this));
847 }
848 RecomputeVarsToScan();
849
850 const double dtime_threshold =
851 DeterministicTime() + params_.feasibility_jump_batch_dtime();
852 while (DeterministicTime() < dtime_threshold) {
853 int var;
854 int64_t new_value;
855 double score;
856 const bool found_move = ScanRelevantVariables(
857 /*num_to_scan=*/3, &var, &new_value, &score);
858 const bool backtrack =
859 !found_move && state_->move->Backtrack(&var, &new_value, &score);
860 if (found_move || backtrack) {
861 if (backtrack) ++state_->counters.num_backtracks;
862 DCHECK_NE(var, -1) << var << " " << found_move << " " << backtrack;
863
864 // Perform the move.
865 ++state_->counters.num_general_moves;
866 const int64_t prev_value = state_->solution[var];
867 DCHECK_NE(prev_value, new_value);
868 state_->solution[var] = new_value;
869
870 // Update the linear part and non-linear part.
871 evaluator_->UpdateLinearScores(var, prev_value, new_value, ScanWeights(),
872 jumps_.Deltas(), jumps_.MutableScores());
873 evaluator_->UpdateNonLinearViolations(var, prev_value, state_->solution);
874 evaluator_->UpdateViolatedList();
875 var_domains_.OnValueChange(var, new_value);
876
877 if (state_->options.use_compound_moves && !backtrack) {
878 // `!backtrack` is just an optimisation - we can never break any new
879 // constraints on backtrack, so we can never change any
880 // compound_weight_.
881 for (const auto& c : evaluator_->last_update_violation_changes()) {
882 if (evaluator_->IsViolated(c) &&
883 state_->compound_weights[c] != state_->weights[c]) {
884 state_->compound_weights[c] = state_->weights[c];
885 if (!state_->in_compound_weight_changed[c]) {
886 state_->in_compound_weight_changed[c] = true;
887 state_->compound_weight_changed.push_back(c);
888 }
889 for (const int v : evaluator_->ConstraintToVars(c)) {
890 jumps_.Recompute(v);
891 // Vars will be added in MarkJumpsThatNeedToBeRecomputed().
892 }
893 } else if (!evaluator_->IsViolated(c) &&
894 !state_->in_compound_weight_changed[c] &&
895 state_->compound_weights[c] == state_->weights[c]) {
896 state_->in_compound_weight_changed[c] = true;
897 state_->compound_weight_changed.push_back(c);
898 }
899 }
900 }
901
902 // Check that the score for undoing the move is -score with both the
903 // default weights (which may be `state_->weights` or
904 // `state_->compound_weights`), and with `weights` explicitly.
905 // TODO(user): Re-enable DCHECK.
906 if (/* DISABLES CODE */ false && !state_->options.use_decay) {
907 DCHECK_EQ(-score, ComputeScore(state_->weights, var,
908 prev_value - new_value, false));
909 DCHECK_EQ(-score, ComputeScore(ScanWeights(), var,
910 prev_value - new_value, false));
911 }
912
913 MarkJumpsThatNeedToBeRecomputed(var);
914 if (var_domains_.HasTwoValues(var)) {
915 // We already know the score of the only possible move (undoing what we
916 // just did).
917 jumps_.SetJump(var, prev_value - new_value, -score);
918 }
919
920 if (state_->options.use_compound_moves && !backtrack) {
921 // Make sure we can undo the move.
922 DCHECK_NE(prev_value, state_->solution[var]);
923 state_->move->Push(var, prev_value, score);
924 if (state_->move->Score() < 0) {
925 state_->counters.num_compound_moves += state_->move->Size();
926 state_->move->Clear();
928 }
929 }
930 continue;
931 } else if (time_limit_crossed_) {
932 return false;
933 }
934
935 DCHECK_EQ(state_->move->Size(), 0);
936 if (evaluator_->ViolatedConstraints().empty()) return true;
937 if (state_->options.use_compound_moves) {
938 ResetChangedCompoundWeights();
939 }
940 if (!state_->options.use_compound_moves ||
941 ++state_->compound_move_max_discrepancy > 2) {
943 UpdateViolatedConstraintWeights();
944 }
945 }
946 return false;
947}
948
949void FeasibilityJumpSolver::ResetChangedCompoundWeights() {
950 if (!state_->options.use_compound_moves) return;
951 DCHECK_EQ(state_->move->Size(), 0);
952 num_ops_ += state_->compound_weight_changed.size();
953 for (const int c : state_->compound_weight_changed) {
954 state_->in_compound_weight_changed[c] = false;
955 const double expected_weight =
956 (evaluator_->IsViolated(c) ? 1.0 : kCompoundDiscount) *
957 state_->weights[c];
958 if (state_->compound_weights[c] == expected_weight) continue;
959 state_->compound_weights[c] = expected_weight;
960 num_ops_ += evaluator_->ConstraintToVars(c).size();
961 for (const int var : evaluator_->ConstraintToVars(c)) {
962 jumps_.Recompute(var);
963 AddVarToScan(var);
964 }
965 }
966 state_->compound_weight_changed.clear();
967}
968
969bool FeasibilityJumpSolver::ShouldExtendCompoundMove(double score,
970 double novelty) {
971 if (state_->move->Score() + score - std::max(novelty, 0.0) < 0) {
972 return true;
973 }
974 return score < state_->move->BestChildScore();
975}
976
977bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan,
978 int* best_var,
979 int64_t* best_value,
980 double* best_score) {
981 if (time_limit_crossed_) return false;
982 if (state_->move->Discrepancy() > state_->compound_move_max_discrepancy) {
983 return false;
984 }
985 double best_scan_score = 0.0;
986 int num_good = 0;
987 int best_index = -1;
988 *best_var = -1;
989 *best_score = 0.0;
990
991 auto remove_var_to_scan_at_index = [&](int index) {
992 in_vars_to_scan_[vars_to_scan_[index]] = false;
993 vars_to_scan_[index] = vars_to_scan_.back();
994 vars_to_scan_.pop_back();
995 if (best_index == vars_to_scan_.size()) {
996 best_index = index;
997 }
998 };
999 while (!vars_to_scan_.empty() && num_good < num_to_scan) {
1000 num_ops_ += 6; // We are slow here.
1001 const int index = absl::Uniform<int>(random_, 0, vars_to_scan_.size());
1002 const int var = vars_to_scan_[index];
1003 DCHECK_GE(var, 0);
1004 DCHECK(in_vars_to_scan_[var]);
1005
1006 if (!ShouldScan(var)) {
1007 remove_var_to_scan_at_index(index);
1008 continue;
1009 }
1010 const auto [delta, scan_score] = jumps_.GetJump(var);
1011 if ((state_->counters.num_general_evals +
1012 state_->counters.num_linear_evals) %
1013 100 ==
1014 0 &&
1015 shared_time_limit_ != nullptr && shared_time_limit_->LimitReached()) {
1016 time_limit_crossed_ = true;
1017 return false;
1018 }
1019 const int64_t current_value = state_->solution[var];
1020 DCHECK(var_domains_[var].Contains(current_value + delta))
1021 << var << " " << current_value << "+" << delta << " not in "
1022 << var_domains_[var].ToString();
1023 DCHECK(!var_domains_[var].IsFixed());
1024 if (scan_score >= 0) {
1025 remove_var_to_scan_at_index(index);
1026 continue;
1027 }
1028 double score = scan_score;
1029 if (state_->options.use_compound_moves) {
1030 // We only use compound moves in general iterations.
1031 score = ComputeScore(
1032 state_->weights, var, delta,
1033 /*linear_only=*/!var_occurs_in_non_linear_constraint_[var]);
1034 if (!ShouldExtendCompoundMove(score, score - scan_score)) {
1035 remove_var_to_scan_at_index(index);
1036 continue;
1037 }
1038 }
1039
1040 ++num_good;
1041 if (scan_score < best_scan_score) {
1042 DCHECK_NE(delta, 0) << score;
1043 *best_var = var;
1044 *best_value = current_value + delta;
1045 *best_score = score;
1046 best_scan_score = scan_score;
1047 best_index = index;
1048 }
1049 }
1050 if (best_index != -1) {
1051 DCHECK_GT(num_good, 0);
1052 DCHECK_GE(*best_var, 0);
1053 DCHECK_EQ(*best_var, vars_to_scan_[best_index]);
1054 remove_var_to_scan_at_index(best_index);
1055 return true;
1056 }
1057 return false;
1058}
1059
1060void FeasibilityJumpSolver::AddVarToScan(int var) {
1061 DCHECK_GE(var, 0);
1062 if (in_vars_to_scan_[var]) return;
1063 if (!ShouldScan(var)) return;
1064 vars_to_scan_.push_back(var);
1065 in_vars_to_scan_[var] = true;
1066}
1067
1068bool FeasibilityJumpSolver::ShouldScan(int var) const {
1069 DCHECK_GE(var, 0);
1070
1071 if (state_->move->OnStack(var)) return false;
1072
1073 if (!jumps_.NeedRecomputation(var)) {
1074 // We already have the score/jump of that variable.
1075 const double score = jumps_.Score(var);
1076 return score < 0.0;
1077 }
1078
1079 // See RecomputeVarsToScan(), we shouldn't have any fixed variable here.
1080 DCHECK(!var_domains_.IsFixed(var));
1081
1082 // Return true iff var is has a better objective value in its domain.
1083 if (var_domains_.HasBetterObjectiveValue(var)) return true;
1084
1085 // We will need to recompute the score. Lets skip variable for which we known
1086 // in advance that there will be no good score.
1087 //
1088 // For the objective, we don't care if it is violated or not, we only want
1089 // to scan variable that might improve it (and thus reduce its violation if it
1090 // is violated).
1091 //
1092 // TODO(user): We should generalize the objective logic to all constraint.
1093 // There is no point scanning a variable of a violated constraint if it is at
1094 // the wrong bound and cannot improve the violation!
1095 return evaluator_->NumViolatedConstraintsForVarIgnoringObjective(var) > 0;
1096}
1097
1098void FeasibilityJumpSolver::RecomputeVarsToScan() {
1099 const int num_variables = var_domains_.size();
1100 jumps_.RecomputeAll(num_variables);
1101 DCHECK(SlowCheckNumViolatedConstraints());
1102
1103 in_vars_to_scan_.assign(num_variables, false);
1104 vars_to_scan_.clear();
1105
1106 // Since the fixed status never changes during one batch, we marks such
1107 // variable as "in_vars_to_scan_" even if we don't add them here. This allow
1108 // to skip them without any extra lookup.
1109 for (const int var : var_domains_.FixedVariables()) {
1110 in_vars_to_scan_[var] = true;
1111 }
1112
1113 num_ops_ += evaluator_->ViolatedConstraints().size();
1114 for (const int c : evaluator_->ViolatedConstraints()) {
1115 num_ops_ += evaluator_->ConstraintToVars(c).size();
1116 for (const int v : evaluator_->ConstraintToVars(c)) {
1117 AddVarToScan(v);
1118 }
1119 }
1120}
1121
1122bool FeasibilityJumpSolver::SlowCheckNumViolatedConstraints() const {
1123 std::vector<int> result;
1124 result.assign(var_domains_.size(), 0);
1125 for (const int c : evaluator_->ViolatedConstraints()) {
1126 if (evaluator_->IsObjectiveConstraint(c)) continue;
1127 for (const int v : evaluator_->ConstraintToVars(c)) {
1128 ++result[v];
1129 }
1130 }
1131 for (int v = 0; v < result.size(); ++v) {
1132 CHECK_EQ(result[v],
1133 evaluator_->NumViolatedConstraintsForVarIgnoringObjective(v));
1134 }
1135 return true;
1136}
1137
1139 for (const UnitMove& move : stack_) {
1140 var_on_stack_[move.var] = false;
1141 }
1142 stack_.clear();
1143}
1144
1146 return !stack_.empty() && var_on_stack_[var];
1147}
1148
1149bool CompoundMoveBuilder::Backtrack(int* var, int64_t* value, double* score) {
1150 if (stack_.empty()) return false;
1151 *var = stack_.back().var;
1152 *value = stack_.back().prev_value;
1153 *score = stack_.back().score;
1154 var_on_stack_[*var] = false;
1155 stack_.pop_back();
1156 if (!stack_.empty()) {
1157 ++stack_.back().discrepancy;
1158 }
1159 return true;
1160}
1161
1162void CompoundMoveBuilder::Push(int var, int64_t prev_value, double score) {
1163 DCHECK(!var_on_stack_[var]);
1164 if (!stack_.empty()) {
1165 stack_.back().best_child_score =
1166 std::min(stack_.back().best_child_score, score);
1167 }
1168 var_on_stack_[var] = true;
1169 stack_.push_back({
1170 .var = var,
1171 .prev_value = prev_value,
1172 .score = -score,
1173 .cumulative_score = Score() + score,
1174 .discrepancy = Discrepancy(),
1175 });
1176}
1177
1179 absl::Span<const Domain> var_domains) const {
1180 for (const UnitMove& move : stack_) {
1181 if (!var_domains[move.var].Contains(move.prev_value)) return false;
1182 }
1183 return true;
1184}
1185
1186} // namespace operations_research::sat
IntegerValue size
void AdvanceDeterministicTime(double deterministic_duration)
Definition time_limit.h:351
bool StackValuesInDomains(absl::Span< const Domain > var_domains) const
Returns true if all prev_values on the stack are in the appropriate domain.
double Score() const
Returns the sum of scores of atomic moved pushed to this compound move.
void Clear()
Removes all moves on the stack.
int Discrepancy() const
Returns the number of backtracks to any ancestor of the current leaf.
bool OnStack(int var) const
Returns true if this var has been set in this move already,.
bool Backtrack(int *var, int64_t *value, double *score)
void Push(int var, int64_t prev_value, double score)
~FeasibilityJumpSolver() override
If VLOG_IS_ON(1), it will export a bunch of statistics.
std::function< void()> GenerateTask(int64_t) final
void Recompute(int var)
Recompute the jump for var when GetJump(var) is next called.
std::pair< int64_t, double > GetJump(int var)
Gets the current jump delta and score, recomputing if necessary.
absl::Span< const int64_t > Deltas() const
void SetComputeFunction(absl::AnyInvocable< std::pair< int64_t, double >(int) const > compute_jump)
void SetJump(int var, int64_t delta, double score)
const CpModelProto & model_proto() const
const std::vector< bool > & ignored_constraints() const
Mask on the constraints of the model passed to the ctor.
const std::vector< ConstraintProto > & additional_constraints() const
Additional constraints created during the initialization.
void CollectStatistics(const LsState &state)
Accumulate in the relevant bucket the counters of the given states.
void LogMessageWithThrottling(const std::string &prefix, const std::string &message)
void LogMessage(absl::string_view prefix, absl::string_view message)
Wrapper around our SolverLogger, but protected by mutex.
void NewSolution(absl::Span< const int64_t > solution_values, const std::string &solution_info, Model *model=nullptr)
const SharedSolutionRepository< int64_t > & SolutionsRepository() const
Solution GetRandomBiasedSolution(absl::BitGenRef random) const
Returns a random solution biased towards good solutions.
void AddTimingStat(const SubSolver &subsolver)
Add a line to the corresponding table.
void AddLsStat(absl::string_view name, int64_t num_batches, int64_t num_restarts, int64_t num_linear_moves, int64_t num_general_moves, int64_t num_compound_moves, int64_t num_bactracks, int64_t num_weight_updates, int64_t num_scores_computed)
void AddTaskDeterministicDuration(double deterministic_duration)
Definition subsolver.h:109
SubsolverType type() const
Returns the type of the subsolver.
Definition subsolver.h:99
std::string name() const
Returns the name of this SubSolver. Used in logs.
Definition subsolver.h:96
void InitializeObjective(const CpModelProto &cp_model_proto)
absl::Span< const Domain > AsSpan() const
void OnValueChange(int var, int64_t value)
Tricky: this must be called on solution value change or domains update.
absl::Span< const int > FixedVariables() const
int64_t value
IntVar * var
int index
std::optional< ModelSolveParameters::SolutionHint > hint
double solution
bool SolutionIsFeasible(const CpModelProto &model, absl::Span< const int64_t > variable_values, const CpModelProto *mapping_proto, const std::vector< int > *postsolve_mapping)
std::function< bool(const Model &)> IsFixed(IntegerVariable v)
Definition integer.h:1967
std::string FormatCounter(int64_t num)
Prints a positive number with separators for easier reading (ex: 1'348'065).
Definition util.cc:49
Domain ReadDomainFromProto(const ProtoWithDomain &proto)
Reads a Domain from the domain field of a proto.
std::pair< Point, Value > RangeConvexMinimum(Point begin, Point end, absl::FunctionRef< Value(Point)> f)
Searches in the range [begin, end), where Point supports basic arithmetic.
std::pair< Point, Value > ConvexMinimum(absl::Span< const Point > sorted_points, std::function< Value(Point)> f)
STL namespace.
int64_t delta
Definition resource.cc:1709
const std::optional< Range > & range
Definition statistics.cc:37
void Randomize(const SatParameters &params, ModelRandomGenerator *random)
double perturbation_probability
These are randomized each restart by Randomize().
std::string name() const
Allows to identify which options worked well.
std::vector< double > compound_weights
LsCounters counters
Counters for a "non-restarted" run.
int64_t last_solution_rank
Used by LS to know the rank of the starting solution for this state.
std::unique_ptr< CompoundMoveBuilder > move
int64_t num_batches_before_change
When this reach zero, we restart / perturbate or trigger something.
int64_t num_restarts
Global counters, incremented across restart.
std::vector< bool > in_compound_weight_changed