Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
feasibility_jump.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdlib.h>
17
18#include <algorithm>
19#include <atomic>
20#include <cmath>
21#include <cstdint>
22#include <functional>
23#include <limits>
24#include <memory>
25#include <string>
26#include <tuple>
27#include <utility>
28#include <vector>
29
30#include "absl/functional/any_invocable.h"
31#include "absl/functional/bind_front.h"
32#include "absl/functional/function_ref.h"
33#include "absl/log/check.h"
34#include "absl/log/log.h"
35#include "absl/log/vlog_is_on.h"
36#include "absl/random/bit_gen_ref.h"
37#include "absl/random/distributions.h"
38#include "absl/strings/str_cat.h"
39#include "absl/types/span.h"
51#include "ortools/sat/util.h"
54
56
57namespace {
58// How much do we discount moves we might fix later.
59constexpr double kCompoundDiscount = 1. / 1024;
60} // namespace
61
63 absl::AnyInvocable<std::pair<int64_t, double>(int) const> compute_jump) {
64 compute_jump_ = std::move(compute_jump);
65}
66
67void JumpTable::RecomputeAll(int num_variables) {
68 deltas_.resize(num_variables);
69 scores_.resize(num_variables);
70 needs_recomputation_.assign(num_variables, true);
71}
72
73void JumpTable::SetJump(int var, int64_t delta, double score) {
74 deltas_[var] = delta;
75 scores_[var] = score;
76 needs_recomputation_[var] = false;
77}
78
79void JumpTable::Recompute(int var) { needs_recomputation_[var] = true; }
80
81bool JumpTable::JumpIsUpToDate(int var) const {
82 const auto& [delta, score] = compute_jump_(var);
83 if (delta != deltas_[var]) {
84 LOG(ERROR) << "Incorrect delta for var " << var << ": " << deltas_[var]
85 << " (should be " << delta << ")";
86 }
87 bool score_ok = true;
88 if (abs(score - scores_[var]) / std::max(abs(score), 1.0) > 1e-2) {
89 score_ok = false;
90 LOG(ERROR) << "Incorrect score for var " << var << ": " << scores_[var]
91 << " (should be " << score << ") "
92 << " delta = " << delta;
93 }
94 return delta == deltas_[var] && score_ok;
95}
96
97std::pair<int64_t, double> JumpTable::GetJump(int var) {
98 if (needs_recomputation_[var]) {
99 needs_recomputation_[var] = false;
100 std::tie(deltas_[var], scores_[var]) = compute_jump_(var);
101 }
102 return std::make_pair(deltas_[var], scores_[var]);
103}
104
106 // Do a final collection.
107 for (int i = 0; i < states_.size(); ++i) {
108 CollectStatistics(*states_[i].get());
109 }
110
111 // Display aggregated states
112 for (const auto& [options, counters] : options_to_stats_) {
113 stat_tables_->AddLsStat(
114 absl::StrCat(name_, "_", options.name()), counters.num_batches,
115 options_to_num_restarts_[options] + counters.num_perturbations,
116 counters.num_linear_moves, counters.num_general_moves,
117 counters.num_compound_moves, counters.num_backtracks,
118 counters.num_weight_updates, counters.num_scores_computed);
119 }
120}
121
123 stat_tables_->AddTimingStat(*this);
124}
125
126void FeasibilityJumpSolver::ImportState() {
127 state_ = states_->GetNextState();
128 if (state_->move == nullptr) {
129 const int num_variables = var_domains_.size();
130 state_->move = std::make_unique<CompoundMoveBuilder>(num_variables);
131 }
132}
133
134void FeasibilityJumpSolver::ReleaseState() { states_->Release(state_); }
135
136bool FeasibilityJumpSolver::Initialize() {
137 // For now we just disable or enable it.
138 // But in the future we might have more variation.
139 if (params_.feasibility_jump_linearization_level() == 0) {
140 evaluator_ = std::make_unique<LsEvaluator>(linear_model_->model_proto(),
141 params_, &time_limit_);
142 } else {
143 evaluator_ = std::make_unique<LsEvaluator>(
144 linear_model_->model_proto(), params_,
145 linear_model_->ignored_constraints(),
146 linear_model_->additional_constraints(), &time_limit_);
147 }
148
149 if (time_limit_.LimitReached()) {
150 evaluator_.reset();
151 return false;
152 }
153 is_initialized_ = true;
154 const int num_variables = linear_model_->model_proto().variables().size();
155 var_domains_.resize(num_variables);
156 for (int v = 0; v < num_variables; ++v) {
157 var_domains_.Set(
158 v, ReadDomainFromProto(linear_model_->model_proto().variables(v)));
159 }
160 var_domains_.InitializeObjective(linear_model_->model_proto());
161
162 vars_to_scan_.ClearAndReserve(num_variables);
163 var_occurs_in_non_linear_constraint_.resize(num_variables);
164 for (int c = 0; c < evaluator_->NumNonLinearConstraints(); ++c) {
165 for (int v : evaluator_->GeneralConstraintToVars(c)) {
166 var_occurs_in_non_linear_constraint_[v] = true;
167 }
168 }
169 return true;
170}
171
172namespace {
173
174int64_t ComputeRange(int64_t range, double range_ratio) {
175 return static_cast<int64_t>(
176 std::ceil(static_cast<double>(range) * range_ratio));
177}
178
179// TODO(user): Optimize and move to the Domain class.
180// TODO(user): Improve entropy on non continuous domains.
181int64_t RandomValueNearMin(const Domain& domain, double range_ratio,
182 absl::BitGenRef random) {
183 if (domain.Size() == 1) return domain.FixedValue();
184 if (domain.Size() == 2) {
185 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Min()
186 : domain.Max();
187 }
188 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
189 return domain.ValueAtOrBefore(domain.Min() +
190 absl::LogUniform<int64_t>(random, 0, range));
191}
192
193int64_t RandomValueNearMax(const Domain& domain, double range_ratio,
194 absl::BitGenRef random) {
195 if (domain.Size() == 1) return domain.FixedValue();
196 if (domain.Size() == 2) {
197 return absl::Bernoulli(random, 1 - range_ratio) ? domain.Max()
198 : domain.Min();
199 }
200 const int64_t range = ComputeRange(domain.Max() - domain.Min(), range_ratio);
201 return domain.ValueAtOrAfter(domain.Max() -
202 absl::LogUniform<int64_t>(random, 0, range));
203}
204
205int64_t RandomValueNearValue(const Domain& domain, int64_t value,
206 double range_ratio, absl::BitGenRef random) {
207 DCHECK(!domain.IsFixed());
208
209 if (domain.Min() >= value) {
210 return RandomValueNearMin(domain, range_ratio, random);
211 }
212
213 if (domain.Max() <= value) {
214 return RandomValueNearMax(domain, range_ratio, random);
215 }
216
217 // Split up or down, and choose value in split domain.
218 const Domain greater_domain =
219 domain.IntersectionWith({value + 1, domain.Max()});
220 const double choose_greater_probability =
221 static_cast<double>(greater_domain.Size()) /
222 static_cast<double>(domain.Size() - 1);
223 if (absl::Bernoulli(random, choose_greater_probability)) {
224 return RandomValueNearMin(greater_domain, range_ratio, random);
225 } else {
226 return RandomValueNearMax(
227 domain.IntersectionWith({domain.Min(), value - 1}), range_ratio,
228 random);
229 }
230}
231
232} // namespace
233
234void FeasibilityJumpSolver::ResetCurrentSolution(
235 bool use_hint, bool use_objective, double perturbation_probability) {
236 const int num_variables = linear_model_->model_proto().variables().size();
237 const double default_value_probability = 1.0 - perturbation_probability;
238 const double range_ratio =
239 params_.feasibility_jump_var_perburbation_range_ratio();
240 std::vector<int64_t>& solution = state_->solution;
241 state_->base_solution = nullptr;
242
243 // Resize the solution if needed.
244 solution.resize(num_variables);
245
246 // Starts with values closest to zero.
247 for (int var = 0; var < num_variables; ++var) {
248 if (var_domains_[var].IsFixed()) {
249 solution[var] = var_domains_[var].FixedValue();
250 continue;
251 }
252
253 if (absl::Bernoulli(random_, default_value_probability)) {
254 solution[var] = var_domains_[var].SmallestValue();
255 } else {
256 solution[var] =
257 RandomValueNearValue(var_domains_[var], 0, range_ratio, random_);
258 }
259 }
260
261 // Use objective half of the time (if the model has one).
262 if (use_objective && linear_model_->model_proto().has_objective()) {
263 const int num_terms =
264 linear_model_->model_proto().objective().vars().size();
265 for (int i = 0; i < num_terms; ++i) {
266 const int var = linear_model_->model_proto().objective().vars(i);
267 if (var_domains_[var].IsFixed()) continue;
268
269 if (linear_model_->model_proto().objective().coeffs(i) > 0) {
270 if (absl::Bernoulli(random_, default_value_probability)) {
271 solution[var] = var_domains_[var].Min();
272 } else {
273 solution[var] =
274 RandomValueNearMin(var_domains_[var], range_ratio, random_);
275 }
276 } else {
277 if (absl::Bernoulli(random_, default_value_probability)) {
278 solution[var] = var_domains_[var].Max();
279 } else {
280 solution[var] =
281 RandomValueNearMax(var_domains_[var], range_ratio, random_);
282 }
283 }
284 }
285 }
286
287 if (use_hint && linear_model_->model_proto().has_solution_hint()) {
288 const auto& hint = linear_model_->model_proto().solution_hint();
289 for (int i = 0; i < hint.vars().size(); ++i) {
290 solution[hint.vars(i)] = hint.values(i);
291 }
292 }
293}
294
295void FeasibilityJumpSolver::PerturbateCurrentSolution(
296 double perturbation_probability) {
297 if (perturbation_probability == 0.0) return;
298 const int num_variables = linear_model_->model_proto().variables().size();
299 const double perturbation_ratio =
300 params_.feasibility_jump_var_perburbation_range_ratio();
301 std::vector<int64_t>& solution = state_->solution;
302 for (int var = 0; var < num_variables; ++var) {
303 if (var_domains_[var].IsFixed()) continue;
304 if (absl::Bernoulli(random_, perturbation_probability)) {
305 solution[var] = RandomValueNearValue(var_domains_[var], solution[var],
306 perturbation_ratio, random_);
307 }
308 }
309}
310
311std::string FeasibilityJumpSolver::OneLineStats() const {
312 // Moves and evaluations in the general iterations.
313 const std::string general_str =
314 state_->counters.num_general_evals == 0 &&
315 state_->counters.num_general_moves == 0
316 ? ""
317 : absl::StrCat(
318 " gen{mvs:", FormatCounter(state_->counters.num_general_moves),
319 " evals:", FormatCounter(state_->counters.num_general_evals),
320 "}");
321 const std::string compound_str =
322 state_->counters.num_compound_moves == 0 &&
323 state_->counters.num_backtracks == 0
324 ? ""
325 : absl::StrCat(" comp{mvs:",
326 FormatCounter(state_->counters.num_compound_moves),
327 " btracks:",
328 FormatCounter(state_->counters.num_backtracks), "}");
329
330 // Improving jumps and infeasible constraints.
331 const int num_infeasible_cts = evaluator_->NumInfeasibleConstraints();
332 const std::string non_solution_str =
333 num_infeasible_cts == 0
334 ? ""
335 : absl::StrCat(" #good_moves:", FormatCounter(vars_to_scan_.size()),
336 " #inf_cts:",
337 FormatCounter(evaluator_->NumInfeasibleConstraints()));
338
339 return absl::StrCat(
340 "batch:", state_->counters.num_batches,
341 " lin{mvs:", FormatCounter(state_->counters.num_linear_moves),
342 " evals:", FormatCounter(state_->counters.num_linear_evals), "}",
343 general_str, compound_str, non_solution_str,
344 " #w_updates:", FormatCounter(state_->counters.num_weight_updates),
345 " #perturb:", FormatCounter(state_->counters.num_perturbations));
346}
347
348std::function<void()> FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) {
349 task_generated_ = true; // Atomic.
350
351 return [this] {
352 // We delay initialization to the first task as it might be a bit slow
353 // to scan the whole model, so we want to do this part in parallel.
354 if (!is_initialized_) {
355 if (!Initialize()) {
356 return;
357 }
358 }
359
360 // Load the next state to work on.
361 ImportState();
362
363 // If we found a new best solution, we will restart all violation ls (we
364 // still finish each batch though). We will also reset the luby sequence.
365 bool new_best_solution_was_found = false;
366 if (type() == SubSolver::INCOMPLETE) {
367 const int64_t best = shared_response_->GetBestSolutionObjective().value();
368 if (best < state_->last_solution_rank) {
369 states_->ResetLubyCounter();
370 new_best_solution_was_found = true;
371 state_->last_solution_rank = best;
372 }
373 }
374
375 bool reset_weights = false;
376 if (new_best_solution_was_found || state_->num_batches_before_change <= 0) {
377 reset_weights = true;
378 if (state_->options.use_restart) {
379 states_->CollectStatistics(*state_);
380 state_->options.Randomize(params_, &random_);
381 state_->counters = LsCounters(); // Reset.
382 } else {
383 state_->options.Randomize(params_, &random_);
384 }
385 if (type() == SubSolver::INCOMPLETE) {
386 // This is not used once we have a solution, and setting it to false
387 // allow to fix the logs.
388 state_->options.use_objective = false;
389 }
390
391 const bool first_time = (state_->num_restarts == 0);
392 if (state_->options.use_restart || first_time ||
393 new_best_solution_was_found) {
394 if (type() == SubSolver::INCOMPLETE) {
395 // Choose a base solution for this neighborhood.
396 state_->base_solution =
397 shared_response_->SolutionPool().GetSolutionToImprove(random_);
398 state_->solution = state_->base_solution->variable_values;
399 ++state_->num_solutions_imported;
400 } else {
401 if (!first_time) {
402 // Register this solution before we reset the search.
403 const int num_violations = evaluator_->ViolatedConstraints().size();
404 shared_hints_->AddSolution(state_->solution, num_violations);
405 }
406 ResetCurrentSolution(/*use_hint=*/first_time,
407 state_->options.use_objective,
408 state_->options.perturbation_probability);
409 }
410 } else {
411 PerturbateCurrentSolution(
412 params_.feasibility_jump_var_randomization_probability());
413 }
414
415 if (state_->options.use_restart) {
416 ++state_->num_restarts;
417 states_->ConfigureNextLubyRestart(state_);
418 } else {
419 // TODO(user): Tune the improvement constant, maybe use luby.
420 ++state_->counters.num_perturbations;
421 state_->num_batches_before_change =
422 params_.violation_ls_perturbation_period();
423 }
424 }
425
426 // Between chunk, we synchronize bounds.
427 //
428 // TODO(user): This do not play well with optimizing solution whose
429 // objective lag behind... Basically, we can run LS on old solution but will
430 // only consider it feasible if it improve the best known solution.
431 bool recompute_compound_weights = false;
432 if (linear_model_->model_proto().has_objective()) {
433 const IntegerValue lb = shared_response_->GetInnerObjectiveLowerBound();
434 const IntegerValue ub = shared_response_->GetInnerObjectiveUpperBound();
435 if (lb != state_->saved_inner_objective_lb ||
436 ub != state_->saved_inner_objective_ub) {
437 recompute_compound_weights = true;
438 }
439 state_->saved_inner_objective_lb = lb;
440 state_->saved_inner_objective_ub = ub;
441
442 if (ub < lb) return; // Search is finished.
443 if (evaluator_->ReduceObjectiveBounds(lb.value(), ub.value())) {
444 recompute_compound_weights = true;
445 }
446 }
447
448 // Update the variable domains with the last information.
449 if (!var_domains_.UpdateFromSharedBounds()) return;
450
451 // Checks the current solution is compatible with updated domains.
452 {
453 // Make sure the solution is within the potentially updated domain.
454 // This also initialize var_domains_.CanIncrease()/CanDecrease().
455 const int num_vars = state_->solution.size();
456 for (int var = 0; var < num_vars; ++var) {
457 const int64_t old_value = state_->solution[var];
458 const int64_t new_value = var_domains_[var].ClosestValue(old_value);
459 if (new_value != old_value) {
460 state_->solution[var] = new_value;
461 recompute_compound_weights = true;
462 }
463 var_domains_.OnValueChange(var, new_value);
464 }
465 // Check if compound move search might backtrack out of the new domains.
466 if (!state_->move->StackValuesInDomains(var_domains_.AsSpan())) {
467 recompute_compound_weights = true;
468 }
469 }
470
471 // Search for feasible solution.
472 // We always recompute that since we might have loaded from a different
473 // state.
474 evaluator_->ComputeAllViolations(state_->solution);
475
476 if (reset_weights) {
477 state_->bump_value = 1.0;
478 state_->weights.assign(evaluator_->NumEvaluatorConstraints(), 1.0);
479 recompute_compound_weights = true;
480 }
481 if (recompute_compound_weights) {
482 state_->move->Clear();
483 if (state_->options.use_compound_moves) {
484 state_->compound_weights.assign(state_->weights.begin(),
485 state_->weights.end());
486 for (int c = 0; c < state_->weights.size(); ++c) {
487 if (evaluator_->IsViolated(c)) continue;
488 state_->compound_weights[c] *= kCompoundDiscount;
489 }
490 state_->compound_weight_changed.clear();
491 state_->in_compound_weight_changed.assign(state_->weights.size(),
492 false);
493 state_->compound_move_max_discrepancy = 0;
494 }
495 }
496
497 if (!state_->options.use_compound_moves) {
498 DCHECK_EQ(state_->move->Size(), 0);
499 }
500
501 ++state_->counters.num_batches;
502 if (DoSomeLinearIterations() && DoSomeGeneralIterations()) {
503 // Checks for infeasibility induced by the non supported constraints.
504 //
505 // TODO(user): Checking the objective is faster and we could avoid to
506 // check feasibility if we are not going to keep the solution anyway.
507 if (SolutionIsFeasible(linear_model_->model_proto(), state_->solution)) {
508 auto pointers = PushAndMaybeCombineSolution(
509 shared_response_, linear_model_->model_proto(), state_->solution,
510 absl::StrCat(name(), "_", state_->options.name(), "(",
511 OneLineStats(), ")"),
512 state_->base_solution);
513 // If we pushed a new solution, we use it as a new "base" so that we
514 // will have a smaller delta on the next solution we find.
515 state_->base_solution = pointers.pushed_solution;
516 } else {
517 shared_response_->LogMessage(name(), "infeasible solution. Aborting.");
518 model_is_supported_ = false;
519 }
520 }
521
522 // Update dtime.
523 // Since we execute only one task at the time, this is safe.
524 {
525 // TODO(user): Find better names. DeterministicTime() is maintained by
526 // this class while deterministic_time() is the one saved in the SubSolver
527 // base class).
528 const double current_dtime = DeterministicTime();
529 const double delta = current_dtime - deterministic_time();
530
531 // Because deterministic_time() is computed with a sum of difference, it
532 // might be slighlty different than DeterministicTime() and we don't want
533 // to go backward, even by 1e-18.
534 if (delta >= 0) {
536 shared_time_limit_->AdvanceDeterministicTime(delta);
537 }
538 }
539
540 ReleaseState();
541 task_generated_ = false; // Atomic.
542 };
543}
544
545double FeasibilityJumpSolver::ComputeScore(absl::Span<const double> weights,
546 int var, int64_t delta,
547 bool linear_only) {
549 double score = evaluator_->WeightedViolationDelta(
550 linear_only, weights, var, delta, absl::MakeSpan(state_->solution));
551 constexpr double kEpsilon = 1.0 / std::numeric_limits<int64_t>::max();
552 score += kEpsilon * delta * evaluator_->ObjectiveCoefficient(var);
553 return score;
554}
555
556std::pair<int64_t, double> FeasibilityJumpSolver::ComputeLinearJump(int var) {
557 DCHECK(!var_domains_[var].IsFixed());
558 const int64_t current_value = state_->solution[var];
559
560 ++state_->counters.num_linear_evals;
561 const LinearIncrementalEvaluator& linear_evaluator =
562 evaluator_->LinearEvaluator();
563
564 if (var_domains_.HasTwoValues(var)) {
565 const int64_t min_value = var_domains_[var].Min();
566 const int64_t max_value = var_domains_[var].Max();
567 const int64_t delta = current_value == min_value ? max_value - min_value
568 : min_value - max_value;
569 return std::make_pair(
570 delta, ComputeScore(ScanWeights(), var, delta, /*linear_only=*/true));
571 }
572
573 // In practice, after a few iterations, the chance of finding an improving
574 // move is slim, and we can test that fairly easily with at most two
575 // queries!
576 //
577 // Tricky/Annoying: if the value is not in the domain, we returns it.
578 const int64_t p1 = var_domains_[var].ValueAtOrBefore(current_value - 1);
579 const int64_t p2 = var_domains_[var].ValueAtOrAfter(current_value + 1);
580
581 std::pair<int64_t, double> best_jump;
582 const double v1 = var_domains_[var].Contains(p1)
583 ? ComputeScore(ScanWeights(), var, p1 - current_value,
584 /*linear_only=*/true)
585 : std::numeric_limits<double>::infinity();
586 if (v1 < 0.0) {
587 // Point p1 is improving. Look for best before it.
588 // Note that we can exclude all point after current_value since it is
589 // worse and we assume convexity.
590 const Domain dom = var_domains_[var].IntersectionWith(
591 Domain(std::numeric_limits<int64_t>::min(), p1 - 1));
592 if (dom.IsEmpty()) {
593 best_jump = {p1, v1};
594 } else {
595 tmp_breakpoints_ =
596 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
598 /*is_to_the_right=*/true, {p1, v1}, tmp_breakpoints_,
599 [this, var, current_value](int64_t jump_value) {
600 return ComputeScore(ScanWeights(), var, jump_value - current_value,
601 /*linear_only=*/true);
602 });
603 }
604 } else {
605 const double v2 = var_domains_[var].Contains(p2)
606 ? ComputeScore(ScanWeights(), var, p2 - current_value,
607 /*linear_only=*/true)
608 : std::numeric_limits<double>::infinity();
609 if (v2 < 0.0) {
610 // Point p2 is improving. Look for best after it.
611 // Similarly, we exclude the other points by convexity.
612 const Domain dom = var_domains_[var].IntersectionWith(
613 Domain(p2 + 1, std::numeric_limits<int64_t>::max()));
614 if (dom.IsEmpty()) {
615 best_jump = {p2, v2};
616 } else {
617 tmp_breakpoints_ =
618 linear_evaluator.SlopeBreakpoints(var, current_value, dom);
620 /*is_to_the_right=*/false, {p2, v2}, tmp_breakpoints_,
621 [this, var, current_value](int64_t jump_value) {
622 return ComputeScore(ScanWeights(), var,
623 jump_value - current_value,
624 /*linear_only=*/true);
625 });
626 }
627 } else {
628 // We have no improving point, result is either p1 or p2. This is the
629 // most common scenario, and require no breakpoint computation!
630 // Choose the direction which increases violation the least,
631 // disambiguating by best objective.
632 if (v1 < v2) {
633 best_jump = {p1, v1};
634 } else {
635 best_jump = {p2, v2};
636 }
637 }
638 }
639 DCHECK_NE(best_jump.first, current_value);
640 return std::make_pair(best_jump.first - current_value, best_jump.second);
641}
642
643std::pair<int64_t, double> FeasibilityJumpSolver::ComputeGeneralJump(int var) {
644 if (!var_occurs_in_non_linear_constraint_[var]) {
645 return ComputeLinearJump(var);
646 }
647 Domain domain = var_domains_[var];
648 if (domain.IsFixed()) return std::make_pair(0, 0.0);
649
650 ++state_->counters.num_general_evals;
651 const int64_t current_value = state_->solution[var];
652 domain = domain.IntersectionWith(
653 Domain(current_value, current_value).Complement());
654 std::pair<int64_t, double> result;
655 for (int i = 0; i < domain.NumIntervals(); ++i) {
656 const int64_t min_delta = domain[i].start - current_value;
657 const int64_t max_delta = domain[i].end - current_value;
658 const auto& [delta, score] = RangeConvexMinimum<int64_t, double>(
659 min_delta, max_delta + 1, [&](int64_t delta) -> double {
660 return ComputeScore(ScanWeights(), var, delta, /*linear_only=*/false);
661 });
662 if (i == 0 || score < result.second) {
663 result = std::make_pair(delta, score);
664 }
665 }
666 DCHECK(domain.Contains(current_value + result.first))
667 << current_value << "+" << result.first << " not in domain "
668 << domain.ToString();
669 return result;
670}
671
672void FeasibilityJumpSolver::UpdateViolatedConstraintWeights() {
673 ++state_->counters.num_weight_updates;
674
675 // Because we update the weight incrementally, it is better to not have a
676 // super high magnitude, otherwise doing +max_weight and then -max_weight
677 // will just ignore any constraint with a small weight and our
678 // DCHECK(JumpIsUpToDate(var)) will fail more often.
679 const double kMaxWeight = 1e10;
680 const double kBumpFactor = 1.0 / params_.feasibility_jump_decay();
681 const int num_variables = var_domains_.size();
682 if (state_->options.use_decay) {
683 state_->bump_value *= kBumpFactor;
684 }
685
686 // Note that ViolatedConstraints() might contain only linear constraint
687 // depending on how it was initialized and updated.
688 bool rescale = false;
689 num_ops_ += evaluator_->ViolatedConstraints().size();
690 for (const int c : evaluator_->ViolatedConstraints()) {
691 DCHECK(evaluator_->IsViolated(c));
692 if (state_->options.use_compound_moves) {
693 DCHECK_EQ(state_->compound_weights[c], state_->weights[c]);
694 }
695 state_->weights[c] += state_->bump_value;
696 if (state_->options.use_compound_moves) {
697 state_->compound_weights[c] = state_->weights[c];
698 }
699 if (state_->weights[c] > kMaxWeight) {
700 rescale = true;
701 }
702 }
703
704 if (rescale) {
705 const double factor = 1.0 / kMaxWeight;
706 state_->bump_value *= factor;
707 for (int c = 0; c < state_->weights.size(); ++c) {
708 state_->weights[c] *= factor;
709 if (state_->options.use_compound_moves) {
710 state_->compound_weights[c] *= factor;
711 }
712 }
713 jumps_.RecomputeAll(num_variables);
714 return;
715 }
716
717 // Update weight incrementally.
718 //
719 // To maximize floating point precision, we compute the change to jump value
720 // first and then apply it in one go. Also, in most situation the change is
721 // purely integer and should fit exactly on a double, so we don't depend on
722 // the order in which constraint are listed.
723 LinearIncrementalEvaluator* linear_evaluator =
724 evaluator_->MutableLinearEvaluator();
725 linear_evaluator->ClearAffectedVariables();
726 for_weight_update_.resize(num_variables);
727 num_ops_ += evaluator_->ViolatedConstraints().size();
728 for (const int c : evaluator_->ViolatedConstraints()) {
729 if (c < evaluator_->NumLinearConstraints()) {
730 linear_evaluator->UpdateScoreOnWeightUpdate(
731 c, jumps_.Deltas(), absl::MakeSpan(for_weight_update_));
732 } else {
733 for (const int v : evaluator_->ConstraintToVars(c)) {
734 jumps_.Recompute(v);
735 AddVarToScan(v);
736 }
737 }
738 }
739
740 // Recompute the affected jumps.
741 // Note that the constraint violations are unaffected.
742 absl::Span<double> scores = jumps_.MutableScores();
743 for (const int var : linear_evaluator->VariablesAffectedByLastUpdate()) {
744 // Apply the delta.
745 //
746 // TODO(user): We could compute the minimal bump that would lead to a
747 // good move. That might change depending on the jump value though, so
748 // we can only do that easily for Booleans.
749 if (!var_domains_.HasTwoValues(var)) {
750 jumps_.Recompute(var);
751 } else {
752 // We may know the correct score for binary vars.
753 scores[var] += state_->bump_value * for_weight_update_[var];
754 }
755 AddVarToScan(var);
756 }
757}
758
759bool FeasibilityJumpSolver::DoSomeLinearIterations() {
760 if (VLOG_IS_ON(1)) {
761 shared_response_->LogMessageWithThrottling(name(), OneLineStats());
762 }
763
764 // TODO(user): It should be possible to support compound moves with
765 // the specialized linear code, but lets keep it simpler for now.
766 if (state_->options.use_compound_moves) return true;
767
768 evaluator_->RecomputeViolatedList(/*linear_only=*/true);
769 jumps_.SetComputeFunction(
770 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
771 RecomputeVarsToScan();
772
773 // Do a batch of a given dtime.
774 // Outer loop: when no more greedy moves, update the weight.
775 const double dtime_threshold =
776 DeterministicTime() + params_.feasibility_jump_batch_dtime();
777 while (DeterministicTime() < dtime_threshold) {
778 // Inner loop: greedy descent.
779 while (DeterministicTime() < dtime_threshold) {
780 // Take the best jump score amongst some random candidates.
781 // It is okay if we pick twice the same, we don't really care.
782 int best_var;
783 int64_t best_value;
784 double best_score;
785 if (!ScanRelevantVariables(/*num_to_scan=*/5, &best_var, &best_value,
786 &best_score)) {
787 break;
788 }
789
790 // Perform the move.
791 ++state_->counters.num_linear_moves;
792 const int64_t prev_value = state_->solution[best_var];
793 state_->solution[best_var] = best_value;
794 evaluator_->UpdateLinearScores(best_var, prev_value, best_value,
795 state_->weights, jumps_.Deltas(),
796 jumps_.MutableScores());
797 evaluator_->UpdateViolatedList();
798 var_domains_.OnValueChange(best_var, best_value);
799
800 MarkJumpsThatNeedToBeRecomputed(best_var);
801 if (var_domains_.HasTwoValues(best_var)) {
802 // We already know the score of undoing the move we just did, and that
803 // this is optimal.
804 jumps_.SetJump(best_var, prev_value - best_value, -best_score);
805 }
806 }
807 if (time_limit_crossed_) return false;
808
809 // We will update the weight unless the queue is non-empty.
810 if (vars_to_scan_.empty()) {
811 // Note that we only count linear constraint as violated here.
812 if (evaluator_->ViolatedConstraints().empty()) return true;
813 UpdateViolatedConstraintWeights();
814 }
815 }
816 return false;
817}
818
819// Update the jump scores.
820//
821// We incrementally maintain the score (except for best_var).
822// However for non-Boolean, we still need to recompute the jump value.
823// We will do that in a lazy fashion.
824//
825// TODO(user): In the paper, they just recompute the scores and only
826// change the jump values when the constraint weight changes. Experiment?
827// Note however that the current code is quite fast.
828//
829// TODO(user): For non-Boolean, we could easily detect if a non-improving
830// score cannot become improving. We don't need to add such variable to
831// the queue.
832void FeasibilityJumpSolver::MarkJumpsThatNeedToBeRecomputed(int changed_var) {
833 // To keep DCHECKs happy. Note that we migh overwrite this afterwards with the
834 // known score/jump of undoing the move.
835 jumps_.Recompute(changed_var);
836
837 // Generic part.
838 // No optimization there, we just update all touched variables.
839 // We need to do this before the Linear part, so that the status is correct in
840 // AddVarToScan() for variable with two values.
841 num_ops_ += evaluator_->VarToGeneralConstraints(changed_var).size();
842 for (const int c : evaluator_->VarToGeneralConstraints(changed_var)) {
843 num_ops_ += evaluator_->GeneralConstraintToVars(c).size();
844 for (const int var : evaluator_->GeneralConstraintToVars(c)) {
845 jumps_.Recompute(var);
846 AddVarToScan(var);
847 }
848 }
849
850 // Linear part.
851 num_ops_ += evaluator_->VariablesAffectedByLastLinearUpdate().size();
852 for (const int var : evaluator_->VariablesAffectedByLastLinearUpdate()) {
853 if (!var_domains_.HasTwoValues(var)) {
854 jumps_.Recompute(var);
855 }
856 AddVarToScan(var);
857 }
858}
859
860bool FeasibilityJumpSolver::DoSomeGeneralIterations() {
861 if (!state_->options.use_compound_moves &&
862 evaluator_->NumNonLinearConstraints() == 0) {
863 return true;
864 }
865 // Non-linear constraints are not evaluated in the linear phase.
866 evaluator_->ComputeAllNonLinearViolations(state_->solution);
867 evaluator_->RecomputeViolatedList(/*linear_only=*/false);
868 if (evaluator_->NumNonLinearConstraints() == 0) {
869 jumps_.SetComputeFunction(
870 absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this));
871 } else {
872 jumps_.SetComputeFunction(
873 absl::bind_front(&FeasibilityJumpSolver::ComputeGeneralJump, this));
874 }
875 RecomputeVarsToScan();
876
877 const double dtime_threshold =
878 DeterministicTime() + params_.feasibility_jump_batch_dtime();
879 while (DeterministicTime() < dtime_threshold) {
880 int var;
881 int64_t new_value;
882 double score;
883 const bool found_move = ScanRelevantVariables(
884 /*num_to_scan=*/3, &var, &new_value, &score);
885 const bool backtrack =
886 !found_move && state_->move->Backtrack(&var, &new_value, &score);
887 if (found_move || backtrack) {
888 if (backtrack) ++state_->counters.num_backtracks;
889 DCHECK_NE(var, -1) << var << " " << found_move << " " << backtrack;
890
891 // Perform the move.
892 ++state_->counters.num_general_moves;
893 const int64_t prev_value = state_->solution[var];
894 DCHECK_NE(prev_value, new_value);
895 state_->solution[var] = new_value;
896
897 // Update the linear part and non-linear part.
898 evaluator_->UpdateLinearScores(var, prev_value, new_value, ScanWeights(),
899 jumps_.Deltas(), jumps_.MutableScores());
900 evaluator_->UpdateNonLinearViolations(var, prev_value, state_->solution);
901 evaluator_->UpdateViolatedList();
902 var_domains_.OnValueChange(var, new_value);
903
904 if (state_->options.use_compound_moves && !backtrack) {
905 // `!backtrack` is just an optimisation - we can never break any new
906 // constraints on backtrack, so we can never change any
907 // compound_weight_.
908 for (const auto& c : evaluator_->last_update_violation_changes()) {
909 if (evaluator_->IsViolated(c) &&
910 state_->compound_weights[c] != state_->weights[c]) {
911 state_->compound_weights[c] = state_->weights[c];
912 if (!state_->in_compound_weight_changed[c]) {
913 state_->in_compound_weight_changed[c] = true;
914 state_->compound_weight_changed.push_back(c);
915 }
916 for (const int v : evaluator_->ConstraintToVars(c)) {
917 jumps_.Recompute(v);
918 // Vars will be added in MarkJumpsThatNeedToBeRecomputed().
919 }
920 } else if (!evaluator_->IsViolated(c) &&
921 !state_->in_compound_weight_changed[c] &&
922 state_->compound_weights[c] == state_->weights[c]) {
923 state_->in_compound_weight_changed[c] = true;
924 state_->compound_weight_changed.push_back(c);
925 }
926 }
927 }
928
929 // Check that the score for undoing the move is -score with both the
930 // default weights (which may be `state_->weights` or
931 // `state_->compound_weights`), and with `weights` explicitly.
932 // TODO(user): Re-enable DCHECK.
933 if (/* DISABLES CODE */ false && !state_->options.use_decay) {
934 DCHECK_EQ(-score, ComputeScore(state_->weights, var,
935 prev_value - new_value, false));
936 DCHECK_EQ(-score, ComputeScore(ScanWeights(), var,
937 prev_value - new_value, false));
938 }
939
940 MarkJumpsThatNeedToBeRecomputed(var);
941 if (var_domains_.HasTwoValues(var)) {
942 // We already know the score of the only possible move (undoing what we
943 // just did).
944 jumps_.SetJump(var, prev_value - new_value, -score);
945 }
946
947 if (state_->options.use_compound_moves && !backtrack) {
948 // Make sure we can undo the move.
949 DCHECK_NE(prev_value, state_->solution[var]);
950 state_->move->Push(var, prev_value, score);
951 if (state_->move->Score() < 0) {
952 state_->counters.num_compound_moves += state_->move->Size();
953 state_->move->Clear();
954 state_->compound_move_max_discrepancy = 0;
955 }
956 }
957 continue;
958 } else if (time_limit_crossed_) {
959 return false;
960 }
961
962 DCHECK_EQ(state_->move->Size(), 0);
963 if (evaluator_->ViolatedConstraints().empty()) return true;
964 if (state_->options.use_compound_moves) {
965 ResetChangedCompoundWeights();
966 }
967 if (!state_->options.use_compound_moves ||
968 ++state_->compound_move_max_discrepancy > 2) {
969 state_->compound_move_max_discrepancy = 0;
970 UpdateViolatedConstraintWeights();
971 }
972 }
973 return false;
974}
975
976void FeasibilityJumpSolver::ResetChangedCompoundWeights() {
977 if (!state_->options.use_compound_moves) return;
978 DCHECK_EQ(state_->move->Size(), 0);
979 num_ops_ += state_->compound_weight_changed.size();
980 for (const int c : state_->compound_weight_changed) {
981 state_->in_compound_weight_changed[c] = false;
982 const double expected_weight =
983 (evaluator_->IsViolated(c) ? 1.0 : kCompoundDiscount) *
984 state_->weights[c];
985 if (state_->compound_weights[c] == expected_weight) continue;
986 state_->compound_weights[c] = expected_weight;
987 num_ops_ += evaluator_->ConstraintToVars(c).size();
988 for (const int var : evaluator_->ConstraintToVars(c)) {
989 jumps_.Recompute(var);
990 AddVarToScan(var);
991 }
992 }
993 state_->compound_weight_changed.clear();
994}
995
996bool FeasibilityJumpSolver::ShouldExtendCompoundMove(double score,
997 double novelty) {
998 if (state_->move->Score() + score - std::max(novelty, 0.0) < 0) {
999 return true;
1000 }
1001 return score < state_->move->BestChildScore();
1002}
1003
1004bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan,
1005 int* best_var,
1006 int64_t* best_value,
1007 double* best_score) {
1008 if (time_limit_crossed_) return false;
1009 if (state_->move->Discrepancy() > state_->compound_move_max_discrepancy) {
1010 return false;
1011 }
1012 double best_scan_score = 0.0;
1013 int num_good = 0;
1014 int best_index = -1;
1015 *best_var = -1;
1016 *best_score = 0.0;
1017
1018 auto remove_var_to_scan_at_index = [&](int index) {
1019 in_vars_to_scan_[vars_to_scan_[index]] = false;
1020 vars_to_scan_[index] = vars_to_scan_.back();
1021 vars_to_scan_.pop_back();
1022 if (best_index == vars_to_scan_.size()) {
1023 best_index = index;
1024 }
1025 };
1026 while (!vars_to_scan_.empty() && num_good < num_to_scan) {
1027 num_ops_ += 6; // We are slow here.
1028 const int index = absl::Uniform<int>(random_, 0, vars_to_scan_.size());
1029 const int var = vars_to_scan_[index];
1030 DCHECK_GE(var, 0);
1031 DCHECK(in_vars_to_scan_[var]);
1032
1033 if (!ShouldScan(var)) {
1034 remove_var_to_scan_at_index(index);
1035 continue;
1036 }
1037 const auto [delta, scan_score] = jumps_.GetJump(var);
1038 if ((state_->counters.num_general_evals +
1039 state_->counters.num_linear_evals) %
1040 100 ==
1041 0 &&
1042 shared_time_limit_ != nullptr && shared_time_limit_->LimitReached()) {
1043 time_limit_crossed_ = true;
1044 return false;
1045 }
1046 const int64_t current_value = state_->solution[var];
1047 DCHECK(var_domains_[var].Contains(current_value + delta))
1048 << var << " " << current_value << "+" << delta << " not in "
1049 << var_domains_[var].ToString();
1050 DCHECK(!var_domains_[var].IsFixed());
1051 if (scan_score >= 0) {
1052 remove_var_to_scan_at_index(index);
1053 continue;
1054 }
1055 double score = scan_score;
1056 if (state_->options.use_compound_moves) {
1057 // We only use compound moves in general iterations.
1058 score = ComputeScore(
1059 state_->weights, var, delta,
1060 /*linear_only=*/!var_occurs_in_non_linear_constraint_[var]);
1061 if (!ShouldExtendCompoundMove(score, score - scan_score)) {
1062 remove_var_to_scan_at_index(index);
1063 continue;
1064 }
1065 }
1066
1067 ++num_good;
1068 if (scan_score < best_scan_score) {
1069 DCHECK_NE(delta, 0) << score;
1070 *best_var = var;
1071 *best_value = current_value + delta;
1072 *best_score = score;
1073 best_scan_score = scan_score;
1074 best_index = index;
1075 }
1076 }
1077 if (best_index != -1) {
1078 DCHECK_GT(num_good, 0);
1079 DCHECK_GE(*best_var, 0);
1080 DCHECK_EQ(*best_var, vars_to_scan_[best_index]);
1081 remove_var_to_scan_at_index(best_index);
1082 return true;
1083 }
1084 return false;
1085}
1086
1087void FeasibilityJumpSolver::AddVarToScan(int var) {
1088 DCHECK_GE(var, 0);
1089 if (in_vars_to_scan_[var]) return;
1090 if (!ShouldScan(var)) return;
1091 vars_to_scan_.push_back(var);
1092 in_vars_to_scan_[var] = true;
1093}
1094
1095bool FeasibilityJumpSolver::ShouldScan(int var) const {
1096 DCHECK_GE(var, 0);
1097
1098 if (state_->move->OnStack(var)) return false;
1099
1100 if (!jumps_.NeedRecomputation(var)) {
1101 // We already have the score/jump of that variable.
1102 const double score = jumps_.Score(var);
1103 return score < 0.0;
1104 }
1105
1106 // See RecomputeVarsToScan(), we shouldn't have any fixed variable here.
1107 DCHECK(!var_domains_.IsFixed(var));
1108
1109 // Return true iff var is has a better objective value in its domain.
1110 if (var_domains_.HasBetterObjectiveValue(var)) return true;
1111
1112 // We will need to recompute the score. Lets skip variable for which we known
1113 // in advance that there will be no good score.
1114 //
1115 // For the objective, we don't care if it is violated or not, we only want
1116 // to scan variable that might improve it (and thus reduce its violation if it
1117 // is violated).
1118 //
1119 // TODO(user): We should generalize the objective logic to all constraint.
1120 // There is no point scanning a variable of a violated constraint if it is at
1121 // the wrong bound and cannot improve the violation!
1122 return evaluator_->NumViolatedConstraintsForVarIgnoringObjective(var) > 0;
1123}
1124
1125void FeasibilityJumpSolver::RecomputeVarsToScan() {
1126 const int num_variables = var_domains_.size();
1127 jumps_.RecomputeAll(num_variables);
1128 DCHECK(SlowCheckNumViolatedConstraints());
1129
1130 in_vars_to_scan_.assign(num_variables, false);
1131 vars_to_scan_.clear();
1132
1133 // Since the fixed status never changes during one batch, we marks such
1134 // variable as "in_vars_to_scan_" even if we don't add them here. This allow
1135 // to skip them without any extra lookup.
1136 for (const int var : var_domains_.FixedVariables()) {
1137 in_vars_to_scan_[var] = true;
1138 }
1139
1140 num_ops_ += evaluator_->ViolatedConstraints().size();
1141 for (const int c : evaluator_->ViolatedConstraints()) {
1142 num_ops_ += evaluator_->ConstraintToVars(c).size();
1143 for (const int v : evaluator_->ConstraintToVars(c)) {
1144 AddVarToScan(v);
1145 }
1146 }
1147}
1148
1149bool FeasibilityJumpSolver::SlowCheckNumViolatedConstraints() const {
1150 std::vector<int> result;
1151 result.assign(var_domains_.size(), 0);
1152 for (const int c : evaluator_->ViolatedConstraints()) {
1153 if (evaluator_->IsObjectiveConstraint(c)) continue;
1154 for (const int v : evaluator_->ConstraintToVars(c)) {
1155 ++result[v];
1156 }
1157 }
1158 for (int v = 0; v < result.size(); ++v) {
1159 CHECK_EQ(result[v],
1160 evaluator_->NumViolatedConstraintsForVarIgnoringObjective(v));
1161 }
1162 return true;
1163}
1164
1166 for (const UnitMove& move : stack_) {
1167 var_on_stack_[move.var] = false;
1168 }
1169 stack_.clear();
1170}
1171
1172bool CompoundMoveBuilder::OnStack(int var) const {
1173 return !stack_.empty() && var_on_stack_[var];
1174}
1175
1176bool CompoundMoveBuilder::Backtrack(int* var, int64_t* value, double* score) {
1177 if (stack_.empty()) return false;
1178 *var = stack_.back().var;
1179 *value = stack_.back().prev_value;
1180 *score = stack_.back().score;
1181 var_on_stack_[*var] = false;
1182 stack_.pop_back();
1183 if (!stack_.empty()) {
1184 ++stack_.back().discrepancy;
1185 }
1186 return true;
1187}
1188
1189void CompoundMoveBuilder::Push(int var, int64_t prev_value, double score) {
1190 DCHECK(!var_on_stack_[var]);
1191 if (!stack_.empty()) {
1192 stack_.back().best_child_score =
1193 std::min(stack_.back().best_child_score, score);
1194 }
1195 var_on_stack_[var] = true;
1196 stack_.push_back({
1197 .var = var,
1198 .prev_value = prev_value,
1199 .score = -score,
1200 .cumulative_score = Score() + score,
1201 .discrepancy = Discrepancy(),
1202 });
1203}
1204
1206 absl::Span<const Domain> var_domains) const {
1207 for (const UnitMove& move : stack_) {
1208 if (!var_domains[move.var].Contains(move.prev_value)) return false;
1209 }
1210 return true;
1211}
1212
1213} // namespace operations_research::sat
bool StackValuesInDomains(absl::Span< const Domain > var_domains) const
bool Backtrack(int *var, int64_t *value, double *score)
void Push(int var, int64_t prev_value, double score)
std::function< void()> GenerateTask(int64_t) final
std::pair< int64_t, double > GetJump(int var)
void SetComputeFunction(absl::AnyInvocable< std::pair< int64_t, double >(int) const > compute_jump)
void SetJump(int var, int64_t delta, double score)
void CollectStatistics(const LsState &state)
void AddTaskDeterministicDuration(double deterministic_duration)
Definition subsolver.h:115
SubsolverType type() const
Definition subsolver.h:98
bool SolutionIsFeasible(const CpModelProto &model, absl::Span< const int64_t > variable_values, const CpModelProto *mapping_proto, const std::vector< int > *postsolve_mapping)
PushedSolutionPointers PushAndMaybeCombineSolution(SharedResponseManager *response_manager, const CpModelProto &model_proto, absl::Span< const int64_t > new_solution, absl::string_view solution_info, std::shared_ptr< const SharedSolutionRepository< int64_t >::Solution > base_solution)
std::function< bool(const Model &)> IsFixed(IntegerVariable v)
Definition integer.h:1831
Domain ReadDomainFromProto(const ProtoWithDomain &proto)
Select next search node to expand Select next item_i to add this new search node to the search Generate a new search node where item_i is not in the knapsack Check validity of this new partial solution(using propagators) - If valid
std::pair< Point, Value > RangeConvexMinimum(Point begin, Point end, absl::FunctionRef< Value(Point)> f)
std::string FormatCounter(int64_t num)
Definition logging.cc:30
std::pair< Point, Value > ConvexMinimum(absl::Span< const Point > sorted_points, std::function< Value(Point)> f)
STL namespace.
Definition model.h:50
int64_t Max() const
Definition model.cc:346
int64_t Min() const
Definition model.cc:340
bool Contains(int64_t value) const
Definition model.cc:363
std::unique_ptr< CompoundMoveBuilder > move