1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
6// http://www.apache.org/licenses/LICENSE-2.0
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
16package operations_research.sat;
18option java_package = "com.google.ortools.sat";
19option java_multiple_files = true;
21option csharp_namespace = "Google.OrTools.Sat";
23// Contains the definitions for all the sat algorithm parameters and their
27message SatParameters {
28 // In some context, like in a portfolio of search, it makes sense to name a
29 // given parameters set for logging purpose.
30 optional string name = 171 [default = ""];
32 // ==========================================================================
33 // Branching and polarity
34 // ==========================================================================
36 // Variables without activity (i.e. at the beginning of the search) will be
37 // tried in this preferred order.
39 IN_ORDER = 0; // As specified by the problem.
43 optional VariableOrder preferred_variable_order = 1 [default = IN_ORDER];
45 // Specifies the initial polarity (true/false) when the solver branches on a
46 // variable. This can be modified later by the user, or the phase saving
49 // Note(user): POLARITY_FALSE is usually a good choice because of the
50 // "natural" way to express a linear boolean problem.
56 optional Polarity initial_polarity = 2 [default = POLARITY_FALSE];
58 // If this is true, then the polarity of a variable will be the last value it
59 // was assigned to, or its default polarity if it was never assigned since the
60 // call to ResetDecisionHeuristic().
62 // Actually, we use a newer version where we follow the last value in the
63 // longest non-conflicting partial assignment in the current phase.
65 // This is called 'literal phase saving'. For details see 'A Lightweight
66 // Component Caching Scheme for Satisfiability Solvers' K. Pipatsrisawat and
67 // A.Darwiche, In 10th International Conference on Theory and Applications of
68 // Satisfiability Testing, 2007.
69 optional bool use_phase_saving = 44 [default = true];
71 // If non-zero, then we change the polarity heuristic after that many number
72 // of conflicts in an arithmetically increasing fashion. So x the first time,
73 // 2 * x the second time, etc...
74 optional int32 polarity_rephase_increment = 168 [default = 1000];
76 // The proportion of polarity chosen at random. Note that this take
77 // precedence over the phase saving heuristic. This is different from
78 // initial_polarity:POLARITY_RANDOM because it will select a new random
79 // polarity each time the variable is branched upon instead of selecting one
80 // initially and then always taking this choice.
81 optional double random_polarity_ratio = 45 [default = 0.0];
83 // A number between 0 and 1 that indicates the proportion of branching
84 // variables that are selected randomly instead of choosing the first variable
85 // from the given variable_ordering strategy.
86 optional double random_branches_ratio = 32 [default = 0.0];
88 // Whether we use the ERWA (Exponential Recency Weighted Average) heuristic as
89 // described in "Learning Rate Based Branching Heuristic for SAT solvers",
90 // J.H.Liang, V. Ganesh, P. Poupart, K.Czarnecki, SAT 2016.
91 optional bool use_erwa_heuristic = 75 [default = false];
93 // The initial value of the variables activity. A non-zero value only make
94 // sense when use_erwa_heuristic is true. Experiments with a value of 1e-2
95 // together with the ERWA heuristic showed slighthly better result than simply
96 // using zero. The idea is that when the "learning rate" of a variable becomes
97 // lower than this value, then we prefer to branch on never explored before
98 // variables. This is not in the ERWA paper.
99 optional double initial_variables_activity = 76 [default = 0.0];
101 // When this is true, then the variables that appear in any of the reason of
102 // the variables in a conflict have their activity bumped. This is addition to
103 // the variables in the conflict, and the one that were used during conflict
105 optional bool also_bump_variables_in_conflict_reasons = 77 [default = false];
107 // ==========================================================================
109 // ==========================================================================
111 // Do we try to minimize conflicts (greedily) when creating them.
112 enum ConflictMinimizationAlgorithm {
118 optional ConflictMinimizationAlgorithm minimization_algorithm = 4
119 [default = RECURSIVE];
121 // Whether to expoit the binary clause to minimize learned clauses further.
122 enum BinaryMinizationAlgorithm {
123 NO_BINARY_MINIMIZATION = 0;
124 BINARY_MINIMIZATION_FIRST = 1;
125 BINARY_MINIMIZATION_FIRST_WITH_TRANSITIVE_REDUCTION = 4;
126 BINARY_MINIMIZATION_WITH_REACHABILITY = 2;
127 EXPERIMENTAL_BINARY_MINIMIZATION = 3;
129 optional BinaryMinizationAlgorithm binary_minimization_algorithm = 34
130 [default = BINARY_MINIMIZATION_FIRST];
132 // At a really low cost, during the 1-UIP conflict computation, it is easy to
133 // detect if some of the involved reasons are subsumed by the current
134 // conflict. When this is true, such clauses are detached and later removed
136 optional bool subsumption_during_conflict_analysis = 56 [default = true];
138 // ==========================================================================
139 // Clause database management
140 // ==========================================================================
142 // Trigger a cleanup when this number of "deletable" clauses is learned.
143 optional int32 clause_cleanup_period = 11 [default = 10000];
145 // During a cleanup, we will always keep that number of "deletable" clauses.
146 // Note that this doesn't include the "protected" clauses.
147 optional int32 clause_cleanup_target = 13 [default = 0];
149 // During a cleanup, if clause_cleanup_target is 0, we will delete the
150 // clause_cleanup_ratio of "deletable" clauses instead of aiming for a fixed
151 // target of clauses to keep.
152 optional double clause_cleanup_ratio = 190 [default = 0.5];
154 // Each time a clause activity is bumped, the clause has a chance to be
155 // protected during the next cleanup phase. Note that clauses used as a reason
156 // are always protected.
157 enum ClauseProtection {
158 PROTECTION_NONE = 0; // No protection.
159 PROTECTION_ALWAYS = 1; // Protect all clauses whose activity is bumped.
160 PROTECTION_LBD = 2; // Only protect clause with a better LBD.
162 optional ClauseProtection clause_cleanup_protection = 58
163 [default = PROTECTION_NONE];
165 // All the clauses with a LBD (literal blocks distance) lower or equal to this
166 // parameters will always be kept.
167 optional int32 clause_cleanup_lbd_bound = 59 [default = 5];
169 // The clauses that will be kept during a cleanup are the ones that come
170 // first under this order. We always keep or exclude ties together.
171 enum ClauseOrdering {
172 // Order clause by decreasing activity, then by increasing LBD.
174 // Order clause by increasing LBD, then by decreasing activity.
177 optional ClauseOrdering clause_cleanup_ordering = 60
178 [default = CLAUSE_ACTIVITY];
180 // Same as for the clauses, but for the learned pseudo-Boolean constraints.
181 optional int32 pb_cleanup_increment = 46 [default = 200];
182 optional double pb_cleanup_ratio = 47 [default = 0.5];
184 // ==========================================================================
185 // Variable and clause activities
186 // ==========================================================================
188 // Each time a conflict is found, the activities of some variables are
189 // increased by one. Then, the activity of all variables are multiplied by
190 // variable_activity_decay.
192 // To implement this efficiently, the activity of all the variables is not
193 // decayed at each conflict. Instead, the activity increment is multiplied by
194 // 1 / decay. When an activity reach max_variable_activity_value, all the
195 // activity are multiplied by 1 / max_variable_activity_value.
196 optional double variable_activity_decay = 15 [default = 0.8];
197 optional double max_variable_activity_value = 16 [default = 1e100];
199 // The activity starts at 0.8 and increment by 0.01 every 5000 conflicts until
200 // 0.95. This "hack" seems to work well and comes from:
202 // Glucose 2.3 in the SAT 2013 Competition - SAT Competition 2013
203 // http://edacc4.informatik.uni-ulm.de/SC13/solver-description-download/136
204 optional double glucose_max_decay = 22 [default = 0.95];
205 optional double glucose_decay_increment = 23 [default = 0.01];
206 optional int32 glucose_decay_increment_period = 24 [default = 5000];
208 // Clause activity parameters (same effect as the one on the variables).
209 optional double clause_activity_decay = 17 [default = 0.999];
210 optional double max_clause_activity_value = 18 [default = 1e20];
212 // ==========================================================================
214 // ==========================================================================
216 // Restart algorithms.
218 // A reference for the more advanced ones is:
219 // Gilles Audemard, Laurent Simon, "Refining Restarts Strategies for SAT
220 // and UNSAT", Principles and Practice of Constraint Programming Lecture
221 // Notes in Computer Science 2012, pp 118-126
222 enum RestartAlgorithm {
225 // Just follow a Luby sequence times restart_period.
228 // Moving average restart based on the decision level of conflicts.
229 DL_MOVING_AVERAGE_RESTART = 2;
231 // Moving average restart based on the LBD of conflicts.
232 LBD_MOVING_AVERAGE_RESTART = 3;
234 // Fixed period restart every restart period.
238 // The restart strategies will change each time the strategy_counter is
239 // increased. The current strategy will simply be the one at index
240 // strategy_counter modulo the number of strategy. Note that if this list
241 // includes a NO_RESTART, nothing will change when it is reached because the
242 // strategy_counter will only increment after a restart.
244 // The idea of switching of search strategy tailored for SAT/UNSAT comes from
245 // Chanseok Oh with his COMiniSatPS solver, see http://cs.nyu.edu/~chanseok/.
246 // But more generally, it seems REALLY beneficial to try different strategy.
247 repeated RestartAlgorithm restart_algorithms = 61;
248 optional string default_restart_algorithms = 70
250 "LUBY_RESTART,LBD_MOVING_AVERAGE_RESTART,DL_MOVING_AVERAGE_RESTART"];
252 // Restart period for the FIXED_RESTART strategy. This is also the multiplier
253 // used by the LUBY_RESTART strategy.
254 optional int32 restart_period = 30 [default = 50];
256 // Size of the window for the moving average restarts.
257 optional int32 restart_running_window_size = 62 [default = 50];
259 // In the moving average restart algorithms, a restart is triggered if the
260 // window average times this ratio is greater that the global average.
261 optional double restart_dl_average_ratio = 63 [default = 1.0];
262 optional double restart_lbd_average_ratio = 71 [default = 1.0];
264 // Block a moving restart algorithm if the trail size of the current conflict
265 // is greater than the multiplier times the moving average of the trail size
266 // at the previous conflicts.
267 optional bool use_blocking_restart = 64 [default = false];
268 optional int32 blocking_restart_window_size = 65 [default = 5000];
269 optional double blocking_restart_multiplier = 66 [default = 1.4];
271 // After each restart, if the number of conflict since the last strategy
272 // change is greater that this, then we increment a "strategy_counter" that
273 // can be use to change the search strategy used by the following restarts.
274 optional int32 num_conflicts_before_strategy_changes = 68 [default = 0];
276 // The parameter num_conflicts_before_strategy_changes is increased by that
277 // much after each strategy change.
278 optional double strategy_change_increase_ratio = 69 [default = 0.0];
280 // ==========================================================================
282 // ==========================================================================
284 // Maximum time allowed in seconds to solve a problem.
285 // The counter will starts at the beginning of the Solve() call.
286 optional double max_time_in_seconds = 36 [default = inf];
288 // Maximum time allowed in deterministic time to solve a problem.
289 // The deterministic time should be correlated with the real time used by the
290 // solver, the time unit being as close as possible to a second.
291 optional double max_deterministic_time = 67 [default = inf];
293 // Stops after that number of batches has been scheduled. This only make sense
294 // when interleave_search is true.
295 optional int32 max_num_deterministic_batches = 291 [default = 0];
297 // Maximum number of conflicts allowed to solve a problem.
299 // TODO(user): Maybe change the way the conflict limit is enforced?
300 // currently it is enforced on each independent internal SAT solve, rather
301 // than on the overall number of conflicts across all solves. So in the
302 // context of an optimization problem, this is not really usable directly by a
304 optional int64 max_number_of_conflicts = 37
305 [default = 0x7FFFFFFFFFFFFFFF]; // kint64max
307 // Maximum memory allowed for the whole thread containing the solver. The
308 // solver will abort as soon as it detects that this limit is crossed. As a
309 // result, this limit is approximative, but usually the solver will not go too
312 // TODO(user): This is only used by the pure SAT solver, generalize to CP-SAT.
313 optional int64 max_memory_in_mb = 40 [default = 10000];
315 // Stop the search when the gap between the best feasible objective (O) and
316 // our best objective bound (B) is smaller than a limit.
317 // The exact definition is:
318 // - Absolute: abs(O - B)
319 // - Relative: abs(O - B) / max(1, abs(O)).
321 // Important: The relative gap depends on the objective offset! If you
322 // artificially shift the objective, you will get widely different value of
325 // Note that if the gap is reached, the search status will be OPTIMAL. But
326 // one can check the best objective bound to see the actual gap.
328 // If the objective is integer, then any absolute gap < 1 will lead to a true
329 // optimal. If the objective is floating point, a gap of zero make little
330 // sense so is is why we use a non-zero default value. At the end of the
331 // search, we will display a warning if OPTIMAL is reported yet the gap is
332 // greater than this absolute gap.
333 optional double absolute_gap_limit = 159 [default = 1e-4];
334 optional double relative_gap_limit = 160 [default = 0.0];
336 // ==========================================================================
338 // ==========================================================================
340 // At the beginning of each solve, the random number generator used in some
341 // part of the solver is reinitialized to this seed. If you change the random
342 // seed, the solver may make different choices during the solving process.
344 // For some problems, the running time may vary a lot depending on small
345 // change in the solving algorithm. Running the solver with different seeds
346 // enables to have more robust benchmarks when evaluating new features.
347 optional int32 random_seed = 31 [default = 1];
349 // This is mainly here to test the solver variability. Note that in tests, if
350 // not explicitly set to false, all 3 options will be set to true so that
351 // clients do not rely on the solver returning a specific solution if they are
352 // many equivalent optimal solutions.
353 optional bool permute_variable_randomly = 178 [default = false];
354 optional bool permute_presolve_constraint_order = 179 [default = false];
355 optional bool use_absl_random = 180 [default = false];
357 // Whether the solver should log the search progress. This is the maing
358 // logging parameter and if this is false, none of the logging (callbacks,
359 // log_to_stdout, log_to_response, ...) will do anything.
360 optional bool log_search_progress = 41 [default = false];
362 // Whether the solver should display per sub-solver search statistics.
363 // This is only useful is log_search_progress is set to true, and if the
364 // number of search workers is > 1. Note that in all case we display a bit
365 // of stats with one line per subsolver.
366 optional bool log_subsolver_statistics = 189 [default = false];
368 // Add a prefix to all logs.
369 optional string log_prefix = 185 [default = ""];
372 optional bool log_to_stdout = 186 [default = true];
374 // Log to response proto.
375 optional bool log_to_response = 187 [default = false];
377 // Whether to use pseudo-Boolean resolution to analyze a conflict. Note that
378 // this option only make sense if your problem is modelized using
379 // pseudo-Boolean constraints. If you only have clauses, this shouldn't change
380 // anything (except slow the solver down).
381 optional bool use_pb_resolution = 43 [default = false];
383 // A different algorithm during PB resolution. It minimizes the number of
384 // calls to ReduceCoefficients() which can be time consuming. However, the
385 // search space will be different and if the coefficients are large, this may
386 // lead to integer overflows that could otherwise be prevented.
387 optional bool minimize_reduction_during_pb_resolution = 48 [default = false];
389 // Whether or not the assumption levels are taken into account during the LBD
390 // computation. According to the reference below, not counting them improves
391 // the solver in some situation. Note that this only impact solves under
394 // Gilles Audemard, Jean-Marie Lagniez, Laurent Simon, "Improving Glucose for
395 // Incremental SAT Solving with Assumptions: Application to MUS Extraction"
396 // Theory and Applications of Satisfiability Testing - SAT 2013, Lecture Notes
397 // in Computer Science Volume 7962, 2013, pp 309-317.
398 optional bool count_assumption_levels_in_lbd = 49 [default = true];
400 // ==========================================================================
402 // ==========================================================================
404 // During presolve, only try to perform the bounded variable elimination (BVE)
405 // of a variable x if the number of occurrences of x times the number of
406 // occurrences of not(x) is not greater than this parameter.
407 optional int32 presolve_bve_threshold = 54 [default = 500];
409 // During presolve, we apply BVE only if this weight times the number of
410 // clauses plus the number of clause literals is not increased.
411 optional int32 presolve_bve_clause_weight = 55 [default = 3];
413 // The maximum "deterministic" time limit to spend in probing. A value of
414 // zero will disable the probing.
416 // TODO(user): Clean up. The first one is used in CP-SAT, the other in pure
418 optional double probing_deterministic_time_limit = 226 [default = 1.0];
419 optional double presolve_probing_deterministic_time_limit = 57
422 // Whether we use an heuristic to detect some basic case of blocked clause
423 // in the SAT presolve.
424 optional bool presolve_blocked_clause = 88 [default = true];
426 // Whether or not we use Bounded Variable Addition (BVA) in the presolve.
427 optional bool presolve_use_bva = 72 [default = true];
429 // Apply Bounded Variable Addition (BVA) if the number of clauses is reduced
430 // by stricly more than this threshold. The algorithm described in the paper
431 // uses 0, but quick experiments showed that 1 is a good value. It may not be
432 // worth it to add a new variable just to remove one clause.
433 optional int32 presolve_bva_threshold = 73 [default = 1];
435 // In case of large reduction in a presolve iteration, we perform multiple
436 // presolve iterations. This parameter controls the maximum number of such
437 // presolve iterations.
438 optional int32 max_presolve_iterations = 138 [default = 3];
440 // Whether we presolve the cp_model before solving it.
441 optional bool cp_model_presolve = 86 [default = true];
443 // How much effort do we spend on probing. 0 disables it completely.
444 optional int32 cp_model_probing_level = 110 [default = 2];
446 // Whether we also use the sat presolve when cp_model_presolve is true.
447 optional bool cp_model_use_sat_presolve = 93 [default = true];
449 // If true, we detect variable that are unique to a table constraint and only
450 // there to encode a cost on each tuple. This is usually the case when a WCSP
451 // (weighted constraint program) is encoded into CP-SAT format.
453 // This can lead to a dramatic speed-up for such problems but is still
454 // experimental at this point.
455 optional bool detect_table_with_cost = 216 [default = false];
457 // How much we try to "compress" a table constraint. Compressing more leads to
458 // less Booleans and faster propagation but can reduced the quality of the lp
459 // relaxation. Values goes from 0 to 3 where we always try to fully compress a
460 // table. At 2, we try to automatically decide if it is worth it.
461 optional int32 table_compression_level = 217 [default = 2];
463 // If true, expand all_different constraints that are not permutations.
464 // Permutations (#Variables = #Values) are always expanded.
465 optional bool expand_alldiff_constraints = 170 [default = false];
467 // If true, expand the reservoir constraints by creating booleans for all
468 // possible precedences between event and encoding the constraint.
469 optional bool expand_reservoir_constraints = 182 [default = true];
471 // Mainly useful for testing.
473 // If this and expand_reservoir_constraints is true, we use a different
474 // encoding of the reservoir constraint using circuit instead of precedences.
475 // Note that this is usually slower, but can exercise different part of the
476 // solver. Note that contrary to the precedence encoding, this easily support
479 // WARNING: with this encoding, the constraint take a slighlty different
480 // meaning. The level must be within the reservoir for any permutation of the
481 // events. So we cannot have +100 and -100 at the same time if the maximum
482 // level is 10 (as autorized by the reservoir constraint).
483 optional bool expand_reservoir_using_circuit = 288 [default = false];
485 // Encore cumulative with fixed demands and capacity as a reservoir
486 // constraint. The only reason you might want to do that is to test the
487 // reservoir propagation code!
488 optional bool encode_cumulative_as_reservoir = 287 [default = false];
490 // If the number of expressions in the lin_max is less that the max size
491 // parameter, model expansion replaces target = max(xi) by linear constraint
492 // with the introduction of new booleans bi such that bi => target == xi.
494 // This is mainly for experimenting compared to a custom lin_max propagator.
495 optional int32 max_lin_max_size_for_expansion = 280 [default = 0];
497 // If true, it disable all constraint expansion.
498 // This should only be used to test the presolve of expanded constraints.
499 optional bool disable_constraint_expansion = 181 [default = false];
501 // Linear constraint with a complex right hand side (more than a single
502 // interval) need to be expanded, there is a couple of way to do that.
503 optional bool encode_complex_linear_constraint_with_integer = 223
506 // During presolve, we use a maximum clique heuristic to merge together
507 // no-overlap constraints or at most one constraints. This code can be slow,
508 // so we have a limit in place on the number of explored nodes in the
509 // underlying graph. The internal limit is an int64, but we use double here to
510 // simplify manual input.
511 optional double merge_no_overlap_work_limit = 145 [default = 1e12];
512 optional double merge_at_most_one_work_limit = 146 [default = 1e8];
514 // How much substitution (also called free variable aggregation in MIP
515 // litterature) should we perform at presolve. This currently only concerns
516 // variable appearing only in linear constraints. For now the value 0 turns it
517 // off and any positive value performs substitution.
518 optional int32 presolve_substitution_level = 147 [default = 1];
520 // If true, we will extract from linear constraints, enforcement literals of
521 // the form "integer variable at bound => simplified constraint". This should
522 // always be beneficial except that we don't always handle them as efficiently
523 // as we could for now. This causes problem on manna81.mps (LP relaxation not
524 // as tight it seems) and on neos-3354841-apure.mps.gz (too many literals
525 // created this way).
526 optional bool presolve_extract_integer_enforcement = 174 [default = false];
528 // A few presolve operations involve detecting constraints included in other
529 // constraint. Since there can be a quadratic number of such pairs, and
530 // processing them usually involve scanning them, the complexity of these
531 // operations can be big. This enforce a local deterministic limit on the
532 // number of entries scanned. Default is 1e8.
534 // A value of zero will disable these presolve rules completely.
535 optional int64 presolve_inclusion_work_limit = 201 [default = 100000000];
537 // If true, we don't keep names in our internal copy of the user given model.
538 optional bool ignore_names = 202 [default = true];
540 // Run a max-clique code amongst all the x != y we can find and try to infer
541 // set of variables that are all different. This allows to close neos16.mps
542 // for instance. Note that we only run this code if there is no all_diff
543 // already in the model so that if a user want to add some all_diff, we assume
544 // it is well done and do not try to add more.
546 // This will also detect and add no_overlap constraints, if all the relations
547 // x != y have "offsets" between them. I.e. x > y + offset.
548 optional bool infer_all_diffs = 233 [default = true];
550 // Try to find large "rectangle" in the linear constraint matrix with
551 // identical lines. If such rectangle is big enough, we can introduce a new
552 // integer variable corresponding to the common expression and greatly reduce
553 // the number of non-zero.
554 optional bool find_big_linear_overlap = 234 [default = true];
556 // ==========================================================================
558 // ==========================================================================
560 // Enable or disable "inprocessing" which is some SAT presolving done at
561 // each restart to the root level.
562 optional bool use_sat_inprocessing = 163 [default = true];
564 // Proportion of deterministic time we should spend on inprocessing.
565 // At each "restart", if the proportion is below this ratio, we will do some
566 // inprocessing, otherwise, we skip it for this restart.
567 optional double inprocessing_dtime_ratio = 273 [default = 0.2];
569 // The amount of dtime we should spend on probing for each inprocessing round.
570 optional double inprocessing_probing_dtime = 274 [default = 1.0];
572 // Parameters for an heuristic similar to the one described in "An effective
573 // learnt clause minimization approach for CDCL Sat Solvers",
574 // https://www.ijcai.org/proceedings/2017/0098.pdf
576 // This is the amount of dtime we should spend on this technique during each
577 // inprocessing phase.
579 // The minimization technique is the same as the one used to minimize core in
580 // max-sat. We also minimize problem clauses and not just the learned clause
581 // that we keep forever like in the paper.
582 optional double inprocessing_minimization_dtime = 275 [default = 1.0];
584 // ==========================================================================
586 // ==========================================================================
588 // Specify the number of parallel workers (i.e. threads) to use during search.
589 // This should usually be lower than your number of available cpus +
590 // hyperthread in your machine.
592 // A value of 0 means the solver will try to use all cores on the machine.
593 // A number of 1 means no parallelism.
595 // Note that 'num_workers' is the preferred name, but if it is set to zero,
596 // we will still read the deprecated 'num_search_workers'.
598 // As of 2020-04-10, if you're using SAT via MPSolver (to solve integer
599 // programs) this field is overridden with a value of 8, if the field is not
600 // set *explicitly*. Thus, always set this field explicitly or via
601 // MPSolver::SetNumThreads().
602 optional int32 num_workers = 206 [default = 0];
603 optional int32 num_search_workers = 100 [default = 0];
605 // We distinguish subsolvers that consume a full thread, and the ones that are
606 // always interleaved. If left at zero, we will fix this with a default
607 // formula that depends on num_workers. But if you start modifying what runs,
608 // you might want to fix that to a given value depending on the num_workers
610 optional int32 num_full_subsolvers = 294 [default = 0];
612 // In multi-thread, the solver can be mainly seen as a portfolio of solvers
613 // with different parameters. This field indicates the names of the parameters
614 // that are used in multithread. This only applies to "full" subsolvers.
616 // See cp_model_search.cc to see a list of the names and the default value (if
617 // left empty) that looks like:
618 // - default_lp (linearization_level:1)
619 // - fixed (only if fixed search specified or scheduling)
620 // - no_lp (linearization_level:0)
621 // - max_lp (linearization_level:2)
622 // - pseudo_costs (only if objective, change search heuristic)
623 // - reduced_costs (only if objective, change search heuristic)
624 // - quick_restart (kind of probing)
625 // - quick_restart_no_lp (kind of probing with linearization_level:0)
626 // - lb_tree_search (to improve lower bound, MIP like tree search)
627 // - probing (continuous probing and shaving)
629 // Also, note that some set of parameters will be ignored if they do not make
630 // sense. For instance if there is no objective, pseudo_cost or reduced_cost
631 // search will be ignored. Core based search will only work if the objective
632 // has many terms. If there is no fixed strategy fixed will be ignored. And so
635 // The order is important, as only the first num_full_subsolvers will be
636 // scheduled. You can see in the log which one are selected for a given run.
637 repeated string subsolvers = 207;
639 // A convenient way to add more workers types.
640 // These will be added at the beginning of the list.
641 repeated string extra_subsolvers = 219;
643 // Rather than fully specifying subsolvers, it is often convenient to just
644 // remove the ones that are not useful on a given problem or only keep
645 // specific ones for testing. Each string is interpreted as a "glob", so we
646 // support '*' and '?'.
648 // The way this work is that we will only accept a name that match a filter
649 // pattern (if non-empty) and do not match an ignore pattern. Note also that
650 // these fields work on LNS or LS names even if these are currently not
651 // specified via the subsolvers field.
652 repeated string ignore_subsolvers = 209;
653 repeated string filter_subsolvers = 293;
655 // It is possible to specify additional subsolver configuration. These can be
656 // referred by their params.name() in the fields above. Note that only the
657 // specified field will "overwrite" the ones of the base parameter. If a
658 // subsolver_params has the name of an existing subsolver configuration, the
659 // named parameters will be merged into the subsolver configuration.
660 repeated SatParameters subsolver_params = 210;
662 // Experimental. If this is true, then we interleave all our major search
663 // strategy and distribute the work amongst num_workers.
665 // The search is deterministic (independently of num_workers!), and we
666 // schedule and wait for interleave_batch_size task to be completed before
667 // synchronizing and scheduling the next batch of tasks.
668 optional bool interleave_search = 136 [default = false];
669 optional int32 interleave_batch_size = 134 [default = 0];
671 // Allows objective sharing between workers.
672 optional bool share_objective_bounds = 113 [default = true];
674 // Allows sharing of the bounds of modified variables at level 0.
675 optional bool share_level_zero_bounds = 114 [default = true];
677 // Allows sharing of new learned binary clause between workers.
678 optional bool share_binary_clauses = 203 [default = true];
680 // Allows sharing of short glue clauses between workers.
681 // Implicitly disabled if share_binary_clauses is false.
682 optional bool share_glue_clauses = 285 [default = false];
684 // ==========================================================================
685 // Debugging parameters
686 // ==========================================================================
688 // We have two different postsolve code. The default one should be better and
689 // it allows for a more powerful presolve, but it can be useful to postsolve
690 // using the full solver instead.
691 optional bool debug_postsolve_with_full_solver = 162 [default = false];
693 // If positive, try to stop just after that many presolve rules have been
694 // applied. This is mainly useful for debugging presolve.
695 optional int32 debug_max_num_presolve_operations = 151 [default = 0];
697 // Crash if we do not manage to complete the hint into a full solution.
698 optional bool debug_crash_on_bad_hint = 195 [default = false];
700 // ==========================================================================
701 // Max-sat parameters
702 // ==========================================================================
704 // For an optimization problem, whether we follow some hints in order to find
705 // a better first solution. For a variable with hint, the solver will always
706 // try to follow the hint. It will revert to the variable_branching default
708 optional bool use_optimization_hints = 35 [default = true];
710 // If positive, we spend some effort on each core:
711 // - At level 1, we use a simple heuristic to try to minimize an UNSAT core.
712 // - At level 2, we use propagation to minimize the core but also identify
713 // literal in at most one relationship in this core.
714 optional int32 core_minimization_level = 50 [default = 2];
716 // Whether we try to find more independent cores for a given set of
717 // assumptions in the core based max-SAT algorithms.
718 optional bool find_multiple_cores = 84 [default = true];
720 // If true, when the max-sat algo find a core, we compute the minimal number
721 // of literals in the core that needs to be true to have a feasible solution.
722 // This is also called core exhaustion in more recent max-SAT papers.
723 optional bool cover_optimization = 89 [default = true];
725 // In what order do we add the assumptions in a core-based max-sat algorithm
726 enum MaxSatAssumptionOrder {
727 DEFAULT_ASSUMPTION_ORDER = 0;
728 ORDER_ASSUMPTION_BY_DEPTH = 1;
729 ORDER_ASSUMPTION_BY_WEIGHT = 2;
731 optional MaxSatAssumptionOrder max_sat_assumption_order = 51
732 [default = DEFAULT_ASSUMPTION_ORDER];
734 // If true, adds the assumption in the reverse order of the one defined by
735 // max_sat_assumption_order.
736 optional bool max_sat_reverse_assumption_order = 52 [default = false];
738 // What stratification algorithm we use in the presence of weight.
739 enum MaxSatStratificationAlgorithm {
740 // No stratification of the problem.
741 STRATIFICATION_NONE = 0;
743 // Start with literals with the highest weight, and when SAT, add the
744 // literals with the next highest weight and so on.
745 STRATIFICATION_DESCENT = 1;
747 // Start with all literals. Each time a core is found with a given minimum
748 // weight, do not consider literals with a lower weight for the next core
749 // computation. If the subproblem is SAT, do like in STRATIFICATION_DESCENT
750 // and just add the literals with the next highest weight.
751 STRATIFICATION_ASCENT = 2;
753 optional MaxSatStratificationAlgorithm max_sat_stratification = 53
754 [default = STRATIFICATION_DESCENT];
756 // ==========================================================================
757 // Constraint programming parameters
758 // ==========================================================================
760 // Some search decisions might cause a really large number of propagations to
761 // happen when integer variables with large domains are only reduced by 1 at
762 // each step. If we propagate more than the number of variable times this
763 // parameters we try to take counter-measure. Setting this to 0.0 disable this
766 // TODO(user): Setting this to something like 10 helps in most cases, but the
767 // code is currently buggy and can cause the solve to enter a bad state where
768 // no progress is made.
769 optional double propagation_loop_detection_factor = 221 [default = 10.0];
771 // When this is true, then a disjunctive constraint will try to use the
772 // precedence relations between time intervals to propagate their bounds
773 // further. For instance if task A and B are both before C and task A and B
774 // are in disjunction, then we can deduce that task C must start after
775 // duration(A) + duration(B) instead of simply max(duration(A), duration(B)),
776 // provided that the start time for all task was currently zero.
778 // This always result in better propagation, but it is usually slow, so
779 // depending on the problem, turning this off may lead to a faster solution.
780 optional bool use_precedences_in_disjunctive_constraint = 74 [default = true];
782 // Create one literal for each disjunction of two pairs of tasks. This slows
783 // down the solve time, but improves the lower bound of the objective in the
784 // makespan case. This will be triggered if the number of intervals is less or
785 // equal than the parameter and if use_strong_propagation_in_disjunctive is
787 optional int32 max_size_to_create_precedence_literals_in_disjunctive = 229
790 // Enable stronger and more expensive propagation on no_overlap constraint.
791 optional bool use_strong_propagation_in_disjunctive = 230 [default = false];
793 // Whether we try to branch on decision "interval A before interval B" rather
794 // than on intervals bounds. This usually works better, but slow down a bit
795 // the time to find the first solution.
797 // These parameters are still EXPERIMENTAL, the result should be correct, but
798 // it some corner cases, they can cause some failing CHECK in the solver.
799 optional bool use_dynamic_precedence_in_disjunctive = 263 [default = false];
800 optional bool use_dynamic_precedence_in_cumulative = 268 [default = false];
802 // When this is true, the cumulative constraint is reinforced with overload
803 // checking, i.e., an additional level of reasoning based on energy. This
804 // additional level supplements the default level of reasoning as well as
805 // timetable edge finding.
807 // This always result in better propagation, but it is usually slow, so
808 // depending on the problem, turning this off may lead to a faster solution.
809 optional bool use_overload_checker_in_cumulative = 78 [default = false];
811 // Enable a heuristic to solve cumulative constraints using a modified energy
812 // constraint. We modify the usual energy definition by applying a
813 // super-additive function (also called "conservative scale" or "dual-feasible
814 // function") to the demand and the durations of the tasks.
816 // This heuristic is fast but for most problems it does not help much to find
818 optional bool use_conservative_scale_overload_checker = 286 [default = false];
820 // When this is true, the cumulative constraint is reinforced with timetable
821 // edge finding, i.e., an additional level of reasoning based on the
822 // conjunction of energy and mandatory parts. This additional level
823 // supplements the default level of reasoning as well as overload_checker.
825 // This always result in better propagation, but it is usually slow, so
826 // depending on the problem, turning this off may lead to a faster solution.
827 optional bool use_timetable_edge_finding_in_cumulative = 79 [default = false];
829 // Max number of intervals for the timetable_edge_finding algorithm to
830 // propagate. A value of 0 disables the constraint.
831 optional int32 max_num_intervals_for_timetable_edge_finding = 260
834 // If true, detect and create constraint for integer variable that are "after"
835 // a set of intervals in the same cumulative constraint.
837 // Experimental: by default we just use "direct" precedences. If
838 // exploit_all_precedences is true, we explore the full precedence graph. This
839 // assumes we have a DAG otherwise it fails.
840 optional bool use_hard_precedences_in_cumulative = 215 [default = false];
841 optional bool exploit_all_precedences = 220 [default = false];
843 // When this is true, the cumulative constraint is reinforced with propagators
844 // from the disjunctive constraint to improve the inference on a set of tasks
845 // that are disjunctive at the root of the problem. This additional level
846 // supplements the default level of reasoning.
848 // Propagators of the cumulative constraint will not be used at all if all the
849 // tasks are disjunctive at root node.
851 // This always result in better propagation, but it is usually slow, so
852 // depending on the problem, turning this off may lead to a faster solution.
853 optional bool use_disjunctive_constraint_in_cumulative = 80 [default = true];
855 // When this is true, the no_overlap_2d constraint is reinforced with
856 // propagators from the cumulative constraints. It consists of ignoring the
857 // position of rectangles in one position and projecting the no_overlap_2d on
858 // the other dimension to create a cumulative constraint. This is done on both
859 // axis. This additional level supplements the default level of reasoning.
860 optional bool use_timetabling_in_no_overlap_2d = 200 [default = false];
862 // When this is true, the no_overlap_2d constraint is reinforced with
863 // energetic reasoning. This additional level supplements the default level of
865 optional bool use_energetic_reasoning_in_no_overlap_2d = 213
868 // When this is true, the no_overlap_2d constraint is reinforced with
869 // an energetic reasoning that uses an area-based energy. This can be combined
870 // with the two other overlap heuristics above.
871 optional bool use_area_energetic_reasoning_in_no_overlap_2d = 271
874 // If the number of pairs to look is below this threshold, do an extra step of
875 // propagation in the no_overlap_2d constraint by looking at all pairs of
877 optional int32 max_pairs_pairwise_reasoning_in_no_overlap_2d = 276
880 // When set, it activates a few scheduling parameters to improve the lower
881 // bound of scheduling problems. This is only effective with multiple workers
882 // as it modifies the reduced_cost, lb_tree_search, and probing workers.
883 optional bool use_dual_scheduling_heuristics = 214 [default = true];
885 // The search branching will be used to decide how to branch on unfixed nodes.
886 enum SearchBranching {
887 // Try to fix all literals using the underlying SAT solver's heuristics,
888 // then generate and fix literals until integer variables are fixed. New
889 // literals on integer variables are generated using the fixed search
890 // specified by the user or our default one.
891 AUTOMATIC_SEARCH = 0;
893 // If used then all decisions taken by the solver are made using a fixed
894 // order as specified in the API or in the CpModelProto search_strategy
898 // Simple portfolio search used by LNS workers.
899 PORTFOLIO_SEARCH = 2;
901 // If used, the solver will use heuristics from the LP relaxation. This
902 // exploit the reduced costs of the variables in the relaxation.
905 // If used, the solver uses the pseudo costs for branching. Pseudo costs
906 // are computed using the historical change in objective bounds when some
907 // decision are taken. Note that this works whether we use an LP or not.
908 PSEUDO_COST_SEARCH = 4;
910 // Mainly exposed here for testing. This quickly tries a lot of randomized
911 // heuristics with a low conflict limit. It usually provides a good first
913 PORTFOLIO_WITH_QUICK_RESTART_SEARCH = 5;
915 // Mainly used internally. This is like FIXED_SEARCH, except we follow the
916 // solution_hint field of the CpModelProto rather than using the information
917 // provided in the search_strategy.
920 // Similar to FIXED_SEARCH, but differ in how the variable not listed into
921 // the fixed search heuristics are branched on. This will always start the
922 // search tree according to the specified fixed search strategy, but will
923 // complete it using the default automatic search.
924 PARTIAL_FIXED_SEARCH = 7;
926 // Randomized search. Used to increase entropy in the search.
927 RANDOMIZED_SEARCH = 8;
929 optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH];
931 // Conflict limit used in the phase that exploit the solution hint.
932 optional int32 hint_conflict_limit = 153 [default = 10];
934 // If true, the solver tries to repair the solution given in the hint. This
935 // search terminates after the 'hint_conflict_limit' is reached and the solver
936 // switches to regular search. If false, then we do a FIXED_SEARCH using the
937 // hint until the hint_conflict_limit is reached.
938 optional bool repair_hint = 167 [default = false];
940 // If true, variables appearing in the solution hints will be fixed to their
942 optional bool fix_variables_to_their_hinted_value = 192 [default = false];
944 // If true, search will continuously probe Boolean variables, and integer
945 // variable bounds. This parameter is set to true in parallel on the probing
947 optional bool use_probing_search = 176 [default = false];
949 // Use extended probing (probe bool_or, at_most_one, exactly_one).
950 optional bool use_extended_probing = 269 [default = true];
952 // How many combinations of pairs or triplets of variables we want to scan.
953 optional int32 probing_num_combinations_limit = 272 [default = 20000];
955 // Add a shaving phase (where the solver tries to prove that the lower or
956 // upper bound of a variable are infeasible) to the probing search.
957 optional bool use_shaving_in_probing_search = 204 [default = true];
959 // Specifies the amount of deterministic time spent of each try at shaving a
960 // bound in the shaving search.
961 optional double shaving_search_deterministic_time = 205 [default = 0.001];
963 // Specifies the threshold between two modes in the shaving procedure.
964 // If the range of the variable/objective is less than this threshold, then
965 // the shaving procedure will try to remove values one by one. Otherwise, it
966 // will try to remove one range at a time.
967 optional int64 shaving_search_threshold = 290 [default = 64];
969 // If true, search will search in ascending max objective value (when
970 // minimizing) starting from the lower bound of the objective.
971 optional bool use_objective_lb_search = 228 [default = false];
973 // This search differs from the previous search as it will not use assumptions
974 // to bound the objective, and it will recreate a full model with the
975 // hardcoded objective value.
976 optional bool use_objective_shaving_search = 253 [default = false];
978 // This search takes all Boolean or integer variables, and maximize or
979 // minimize them in order to reduce their domain.
980 optional bool use_variables_shaving_search = 289 [default = false];
982 // The solver ignores the pseudo costs of variables with number of recordings
983 // less than this threshold.
984 optional int64 pseudo_cost_reliability_threshold = 123 [default = 100];
986 // The default optimization method is a simple "linear scan", each time trying
987 // to find a better solution than the previous one. If this is true, then we
988 // use a core-based approach (like in max-SAT) when we try to increase the
989 // lower bound instead.
990 optional bool optimize_with_core = 83 [default = false];
992 // Do a more conventional tree search (by opposition to SAT based one) where
993 // we keep all the explored node in a tree. This is meant to be used in a
994 // portfolio and focus on improving the objective lower bound. Keeping the
995 // whole tree allow us to report a better objective lower bound coming from
996 // the worst open node in the tree.
997 optional bool optimize_with_lb_tree_search = 188 [default = false];
999 // Experimental. Save the current LP basis at each node of the search tree so
1000 // that when we jump around, we can load it and reduce the number of LP
1001 // iterations needed.
1003 // It currently works okay if we do not change the lp with cuts or
1004 // simplification... More work is needed to make it robust in all cases.
1005 optional bool save_lp_basis_in_lb_tree_search = 284 [default = false];
1007 // If non-negative, perform a binary search on the objective variable in order
1008 // to find an [min, max] interval outside of which the solver proved unsat/sat
1009 // under this amount of conflict. This can quickly reduce the objective domain
1010 // on some problems.
1011 optional int32 binary_search_num_conflicts = 99 [default = -1];
1013 // This has no effect if optimize_with_core is false. If true, use a different
1014 // core-based algorithm similar to the max-HS algo for max-SAT. This is a
1015 // hybrid MIP/CP approach and it uses a MIP solver in addition to the CP/SAT
1016 // one. This is also related to the PhD work of tobyodavies@
1017 // "Automatic Logic-Based Benders Decomposition with MiniZinc"
1018 // http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14489
1019 optional bool optimize_with_max_hs = 85 [default = false];
1021 // Parameters for an heuristic similar to the one described in the paper:
1022 // "Feasibility Jump: an LP-free Lagrangian MIP heuristic", Bjørnar
1023 // Luteberget, Giorgio Sartor, 2023, Mathematical Programming Computation.
1024 optional bool use_feasibility_jump = 265 [default = true];
1026 // Disable every other type of subsolver, setting this turns CP-SAT into a
1027 // pure local-search solver.
1028 optional bool use_ls_only = 240 [default = false];
1030 // On each restart, we randomly choose if we use decay (with this parameter)
1032 optional double feasibility_jump_decay = 242 [default = 0.95];
1034 // How much do we linearize the problem in the local search code.
1035 optional int32 feasibility_jump_linearization_level = 257 [default = 2];
1037 // This is a factor that directly influence the work before each restart.
1038 // Increasing it leads to longer restart.
1039 optional int32 feasibility_jump_restart_factor = 258 [default = 1];
1041 // How much dtime for each LS batch.
1042 optional double feasibility_jump_batch_dtime = 292 [default = 0.1];
1044 // Probability for a variable to have a non default value upon restarts or
1046 optional double feasibility_jump_var_randomization_probability = 247
1049 // Max distance between the default value and the pertubated value relative to
1050 // the range of the domain of the variable.
1051 optional double feasibility_jump_var_perburbation_range_ratio = 248
1054 // When stagnating, feasibility jump will either restart from a default
1055 // solution (with some possible randomization), or randomly pertubate the
1056 // current solution. This parameter selects the first option.
1057 optional bool feasibility_jump_enable_restarts = 250 [default = true];
1059 // Maximum size of no_overlap or no_overlap_2d constraint for a quadratic
1060 // expansion. This might look a lot, but by expanding such constraint, we get
1061 // a linear time evaluation per single variable moves instead of a slow O(n
1063 optional int32 feasibility_jump_max_expanded_constraint_size = 264
1066 // This will create incomplete subsolvers (that are not LNS subsolvers)
1067 // that use the feasibility jump code to find improving solution, treating
1068 // the objective improvement as a hard constraint.
1069 optional int32 num_violation_ls = 244 [default = 0];
1071 // How long violation_ls should wait before perturbating a solution.
1072 optional int32 violation_ls_perturbation_period = 249 [default = 100];
1074 // Probability of using compound move search each restart.
1075 // TODO(user): Add reference to paper when published.
1076 optional double violation_ls_compound_move_probability = 259 [default = 0.5];
1078 // Enables experimental workstealing-like shared tree search.
1079 // If non-zero, start this many complete worker threads to explore a shared
1080 // search tree. These workers communicate objective bounds and simple decision
1081 // nogoods relating to the shared prefix of the tree, and will avoid exploring
1082 // the same subtrees as one another.
1083 optional int32 shared_tree_num_workers = 235 [default = 0];
1085 // Set on shared subtree workers. Users should not set this directly.
1086 optional bool use_shared_tree_search = 236 [default = false];
1088 // After their assigned prefix, shared tree workers will branch on the
1089 // objective with this probability. Higher numbers cause the shared tree
1090 // search to focus on improving the lower bound over finding primal solutions.
1091 optional double shared_tree_worker_objective_split_probability = 237
1094 // Minimum number of restarts before a worker will replace a subtree
1095 // that looks "bad" based on the average LBD of learned clauses.
1096 optional int32 shared_tree_worker_min_restarts_per_subtree = 282
1099 // If true, workers share more of the information from their local trail.
1100 // Specifically, literals implied by the shared tree decisions and
1101 // the longest conflict-free assignment from the last restart (to enable
1102 // cross-worker phase-saving).
1103 optional bool shared_tree_worker_enable_trail_sharing = 295 [default = true];
1105 // How many open leaf nodes should the shared tree maintain per worker.
1106 optional double shared_tree_open_leaves_per_worker = 281 [default = 2.0];
1108 // In order to limit total shared memory and communication overhead, limit the
1109 // total number of nodes that may be generated in the shared tree. If the
1110 // shared tree runs out of unassigned leaves, workers act as portfolio
1111 // workers. Note: this limit includes interior nodes, not just leaves.
1112 optional int32 shared_tree_max_nodes_per_worker = 238 [default = 100000];
1114 enum SharedTreeSplitStrategy {
1115 // Uses the default strategy, currently equivalent to
1116 // SPLIT_STRATEGY_DISCREPANCY.
1117 SPLIT_STRATEGY_AUTO = 0;
1118 // Only accept splits if the node to be split's depth+discrepancy is minimal
1119 // for the desired number of leaves.
1120 // The preferred child for discrepancy calculation is the one with the
1121 // lowest objective lower bound or the original branch direction if the
1122 // bounds are equal. This rule allows twice as many workers to work in the
1123 // preferred subtree as non-preferred.
1124 SPLIT_STRATEGY_DISCREPANCY = 1;
1125 // Only split nodes with an objective lb equal to the global lb. If there is
1126 // no objective, this is equivalent to SPLIT_STRATEGY_FIRST_PROPOSAL.
1127 SPLIT_STRATEGY_OBJECTIVE_LB = 2;
1128 // Attempt to keep the shared tree balanced.
1129 SPLIT_STRATEGY_BALANCED_TREE = 3;
1130 // Workers race to split their subtree, the winner's proposal is accepted.
1131 SPLIT_STRATEGY_FIRST_PROPOSAL = 4;
1133 optional SharedTreeSplitStrategy shared_tree_split_strategy = 239
1134 [default = SPLIT_STRATEGY_AUTO];
1136 // Whether we enumerate all solutions of a problem without objective. Note
1137 // that setting this to true automatically disable some presolve reduction
1138 // that can remove feasible solution. That is it has the same effect as
1139 // setting keep_all_feasible_solutions_in_presolve.
1141 // TODO(user): Do not do that and let the user choose what behavior is best by
1142 // setting keep_all_feasible_solutions_in_presolve ?
1143 optional bool enumerate_all_solutions = 87 [default = false];
1145 // If true, we disable the presolve reductions that remove feasible solutions
1146 // from the search space. Such solution are usually dominated by a "better"
1147 // solution that is kept, but depending on the situation, we might want to
1148 // keep all solutions.
1150 // A trivial example is when a variable is unused. If this is true, then the
1151 // presolve will not fix it to an arbitrary value and it will stay in the
1153 optional bool keep_all_feasible_solutions_in_presolve = 173 [default = false];
1155 // If true, add information about the derived variable domains to the
1156 // CpSolverResponse. It is an option because it makes the response slighly
1157 // bigger and there is a bit more work involved during the postsolve to
1158 // construct it, but it should still have a low overhead. See the
1159 // tightened_variables field in CpSolverResponse for more details.
1160 optional bool fill_tightened_domains_in_response = 132 [default = false];
1162 // If true, the final response addition_solutions field will be filled with
1163 // all solutions from our solutions pool.
1165 // Note that if both this field and enumerate_all_solutions is true, we will
1166 // copy to the pool all of the solution found. So if solution_pool_size is big
1167 // enough, you can get all solutions this way instead of using the solution
1170 // Note that this only affect the "final" solution, not the one passed to the
1171 // solution callbacks.
1172 optional bool fill_additional_solutions_in_response = 194 [default = false];
1174 // If true, the solver will add a default integer branching strategy to the
1175 // already defined search strategy. If not, some variable might still not be
1176 // fixed at the end of the search. For now we assume these variable can just
1177 // be set to their lower bound.
1178 optional bool instantiate_all_variables = 106 [default = true];
1180 // If true, then the precedences propagator try to detect for each variable if
1181 // it has a set of "optional incoming arc" for which at least one of them is
1182 // present. This is usually useful to have but can be slow on model with a lot
1184 optional bool auto_detect_greater_than_at_least_one_of = 95 [default = true];
1186 // For an optimization problem, stop the solver as soon as we have a solution.
1187 optional bool stop_after_first_solution = 98 [default = false];
1189 // Mainly used when improving the presolver. When true, stops the solver after
1190 // the presolve is complete (or after loading and root level propagation).
1191 optional bool stop_after_presolve = 149 [default = false];
1192 optional bool stop_after_root_propagation = 252 [default = false];
1196 // Testing parameters used to disable all lns workers.
1197 optional bool use_lns = 283 [default = true];
1199 // Experimental parameters to disable everything but lns.
1200 optional bool use_lns_only = 101 [default = false];
1202 // Size of the top-n different solutions kept by the solver.
1203 // This parameter must be > 0.
1204 // Currently this only impact the "base" solution chosen for a LNS fragment.
1205 optional int32 solution_pool_size = 193 [default = 3];
1207 // Turns on relaxation induced neighborhood generator.
1208 optional bool use_rins_lns = 129 [default = true];
1210 // Adds a feasibility pump subsolver along with lns subsolvers.
1211 optional bool use_feasibility_pump = 164 [default = true];
1213 // Turns on neighborhood generator based on local branching LP. Based on Huang
1214 // et al., "Local Branching Relaxation Heuristics for Integer Linear
1216 optional bool use_lb_relax_lns = 255 [default = false];
1218 // Rounding method to use for feasibility pump.
1219 enum FPRoundingMethod {
1220 // Rounds to the nearest integer value.
1221 NEAREST_INTEGER = 0;
1223 // Counts the number of linear constraints restricting the variable in the
1224 // increasing values (up locks) and decreasing values (down locks). Rounds
1225 // the variable in the direction of lesser locks.
1228 // Similar to lock based rounding except this only considers locks of active
1229 // constraints from the last lp solve.
1230 ACTIVE_LOCK_BASED = 3;
1232 // This is expensive rounding algorithm. We round variables one by one and
1233 // propagate the bounds in between. If none of the rounded values fall in
1234 // the continuous domain specified by lower and upper bound, we use the
1235 // current lower/upper bound (whichever one is closest) instead of rounding
1236 // the fractional lp solution value. If both the rounded values are in the
1237 // domain, we round to nearest integer.
1238 PROPAGATION_ASSISTED = 2;
1240 optional FPRoundingMethod fp_rounding = 165 [default = PROPAGATION_ASSISTED];
1242 // If true, registers more lns subsolvers with different parameters.
1243 optional bool diversify_lns_params = 137 [default = false];
1245 // Randomize fixed search.
1246 optional bool randomize_search = 103 [default = false];
1248 // Search randomization will collect the top
1249 // 'search_random_variable_pool_size' valued variables, and pick one randomly.
1250 // The value of the variable is specific to each strategy.
1251 optional int64 search_random_variable_pool_size = 104 [default = 0];
1253 // Experimental code: specify if the objective pushes all tasks toward the
1254 // start of the schedule.
1255 optional bool push_all_tasks_toward_start = 262 [default = false];
1257 // If true, we automatically detect variables whose constraint are always
1258 // enforced by the same literal and we mark them as optional. This allows
1259 // to propagate them as if they were present in some situation.
1261 // TODO(user): This is experimental and seems to lead to wrong optimal in
1262 // some situation. It should however gives correct solutions. Fix.
1263 optional bool use_optional_variables = 108 [default = false];
1265 // The solver usually exploit the LP relaxation of a model. If this option is
1266 // true, then whatever is infered by the LP will be used like an heuristic to
1267 // compute EXACT propagation on the IP. So with this option, there is no
1268 // numerical imprecision issues.
1269 optional bool use_exact_lp_reason = 109 [default = true];
1271 // This can be beneficial if there is a lot of no-overlap constraints but a
1272 // relatively low number of different intervals in the problem. Like 1000
1273 // intervals, but 1M intervals in the no-overlap constraints covering them.
1274 optional bool use_combined_no_overlap = 133 [default = false];
1276 // All at_most_one constraints with a size <= param will be replaced by a
1277 // quadratic number of binary implications.
1278 optional int32 at_most_one_max_expansion_size = 270 [default = 3];
1280 // Indicates if the CP-SAT layer should catch Control-C (SIGINT) signals
1281 // when calling solve. If set, catching the SIGINT signal will terminate the
1282 // search gracefully, as if a time limit was reached.
1283 optional bool catch_sigint_signal = 135 [default = true];
1285 // Stores and exploits "implied-bounds" in the solver. That is, relations of
1286 // the form literal => (var >= bound). This is currently used to derive
1288 optional bool use_implied_bounds = 144 [default = true];
1290 // Whether we try to do a few degenerate iteration at the end of an LP solve
1291 // to minimize the fractionality of the integer variable in the basis. This
1292 // helps on some problems, but not so much on others. It also cost of bit of
1293 // time to do such polish step.
1294 optional bool polish_lp_solution = 175 [default = false];
1296 // The internal LP tolerances used by CP-SAT. These applies to the internal
1297 // and scaled problem. If the domains of your variables are large it might be
1298 // good to use lower tolerances. If your problem is binary with low
1299 // coefficients, it might be good to use higher ones to speed-up the lp
1301 optional double lp_primal_tolerance = 266 [default = 1e-7];
1302 optional double lp_dual_tolerance = 267 [default = 1e-7];
1304 // Temporary flag util the feature is more mature. This convert intervals to
1305 // the newer proto format that support affine start/var/end instead of just
1307 optional bool convert_intervals = 177 [default = true];
1309 // Whether we try to automatically detect the symmetries in a model and
1310 // exploit them. Currently, at level 1 we detect them in presolve and try
1311 // to fix Booleans. At level 2, we also do some form of dynamic symmetry
1312 // breaking during search. At level 3, we also detect symmetries for very
1313 // large models, which can be slow. At level 4, we try to break as much
1314 // symmetry as possible in presolve.
1315 optional int32 symmetry_level = 183 [default = 2];
1317 // The new linear propagation code treat all constraints at once and use
1318 // an adaptation of Bellman-Ford-Tarjan to propagate constraint in a smarter
1319 // order and potentially detect propagation cycle earlier.
1320 optional bool new_linear_propagation = 224 [default = true];
1322 // Linear constraints that are not pseudo-Boolean and that are longer than
1323 // this size will be split into sqrt(size) intermediate sums in order to have
1324 // faster propation in the CP engine.
1325 optional int32 linear_split_size = 256 [default = 100];
1327 // ==========================================================================
1328 // Linear programming relaxation
1329 // ==========================================================================
1331 // A non-negative level indicating the type of constraints we consider in the
1332 // LP relaxation. At level zero, no LP relaxation is used. At level 1, only
1333 // the linear constraint and full encoding are added. At level 2, we also add
1334 // all the Boolean constraints.
1335 optional int32 linearization_level = 90 [default = 1];
1337 // A non-negative level indicating how much we should try to fully encode
1338 // Integer variables as Boolean.
1339 optional int32 boolean_encoding_level = 107 [default = 1];
1341 // When loading a*x + b*y ==/!= c when x and y are both fully encoded.
1342 // The solver may decide to replace the linear equation by a set of clauses.
1343 // This is triggered if the sizes of the domains of x and y are below the
1345 optional int32 max_domain_size_when_encoding_eq_neq_constraints = 191
1348 // The limit on the number of cuts in our cut pool. When this is reached we do
1349 // not generate cuts anymore.
1351 // TODO(user): We should probably remove this parameters, and just always
1352 // generate cuts but only keep the best n or something.
1353 optional int32 max_num_cuts = 91 [default = 10000];
1355 // Control the global cut effort. Zero will turn off all cut. For now we just
1356 // have one level. Note also that most cuts are only used at linearization
1358 optional int32 cut_level = 196 [default = 1];
1360 // For the cut that can be generated at any level, this control if we only
1361 // try to generate them at the root node.
1362 optional bool only_add_cuts_at_level_zero = 92 [default = false];
1364 // When the LP objective is fractional, do we add the cut that forces the
1365 // linear objective expression to be greater or equal to this fractional value
1366 // rounded up? We can always do that since our objective is integer, and
1367 // combined with MIR heuristic to reduce the coefficient of such cut, it can
1369 optional bool add_objective_cut = 197 [default = false];
1371 // Whether we generate and add Chvatal-Gomory cuts to the LP at root node.
1372 // Note that for now, this is not heavily tuned.
1373 optional bool add_cg_cuts = 117 [default = true];
1375 // Whether we generate MIR cuts at root node.
1376 // Note that for now, this is not heavily tuned.
1377 optional bool add_mir_cuts = 120 [default = true];
1379 // Whether we generate Zero-Half cuts at root node.
1380 // Note that for now, this is not heavily tuned.
1381 optional bool add_zero_half_cuts = 169 [default = true];
1383 // Whether we generate clique cuts from the binary implication graph. Note
1384 // that as the search goes on, this graph will contains new binary clauses
1385 // learned by the SAT engine.
1386 optional bool add_clique_cuts = 172 [default = true];
1388 // Whether we generate RLT cuts. This is still experimental but can help on
1389 // binary problem with a lot of clauses of size 3.
1390 optional bool add_rlt_cuts = 279 [default = true];
1392 // Cut generator for all diffs can add too many cuts for large all_diff
1393 // constraints. This parameter restricts the large all_diff constraints to
1394 // have a cut generator.
1395 optional int32 max_all_diff_cut_size = 148 [default = 64];
1397 // For the lin max constraints, generates the cuts described in "Strong
1398 // mixed-integer programming formulations for trained neural networks" by Ross
1399 // Anderson et. (https://arxiv.org/pdf/1811.01988.pdf)
1400 optional bool add_lin_max_cuts = 152 [default = true];
1402 // In the integer rounding procedure used for MIR and Gomory cut, the maximum
1403 // "scaling" we use (must be positive). The lower this is, the lower the
1404 // integer coefficients of the cut will be. Note that cut generated by lower
1405 // values are not necessarily worse than cut generated by larger value. There
1406 // is no strict dominance relationship.
1408 // Setting this to 2 result in the "strong fractional rouding" of Letchford
1410 optional int32 max_integer_rounding_scaling = 119 [default = 600];
1412 // If true, we start by an empty LP, and only add constraints not satisfied
1413 // by the current LP solution batch by batch. A constraint that is only added
1414 // like this is known as a "lazy" constraint in the literature, except that we
1415 // currently consider all constraints as lazy here.
1416 optional bool add_lp_constraints_lazily = 112 [default = true];
1418 // Even at the root node, we do not want to spend too much time on the LP if
1419 // it is "difficult". So we solve it in "chunks" of that many iterations. The
1420 // solve will be continued down in the tree or the next time we go back to the
1422 optional int32 root_lp_iterations = 227 [default = 2000];
1424 // While adding constraints, skip the constraints which have orthogonality
1425 // less than 'min_orthogonality_for_lp_constraints' with already added
1426 // constraints during current call. Orthogonality is defined as 1 -
1427 // cosine(vector angle between constraints). A value of zero disable this
1429 optional double min_orthogonality_for_lp_constraints = 115 [default = 0.05];
1431 // Max number of time we perform cut generation and resolve the LP at level 0.
1432 optional int32 max_cut_rounds_at_level_zero = 154 [default = 1];
1434 // If a constraint/cut in LP is not active for that many consecutive OPTIMAL
1435 // solves, remove it from the LP. Note that it might be added again later if
1436 // it become violated by the current LP solution.
1437 optional int32 max_consecutive_inactive_count = 121 [default = 100];
1439 // These parameters are similar to sat clause management activity parameters.
1440 // They are effective only if the number of generated cuts exceed the storage
1441 // limit. Default values are based on a few experiments on miplib instances.
1442 optional double cut_max_active_count_value = 155 [default = 1e10];
1443 optional double cut_active_count_decay = 156 [default = 0.8];
1445 // Target number of constraints to remove during cleanup.
1446 optional int32 cut_cleanup_target = 157 [default = 1000];
1448 // Add that many lazy constraints (or cuts) at once in the LP. Note that at
1449 // the beginning of the solve, we do add more than this.
1450 optional int32 new_constraints_batch_size = 122 [default = 50];
1452 // All the "exploit_*" parameters below work in the same way: when branching
1453 // on an IntegerVariable, these parameters affect the value the variable is
1454 // branched on. Currently the first heuristic that triggers win in the order
1455 // in which they appear below.
1457 // TODO(user): Maybe do like for the restart algorithm, introduce an enum
1458 // and a repeated field that control the order on which these are applied?
1460 // If true and the Lp relaxation of the problem has an integer optimal
1461 // solution, try to exploit it. Note that since the LP relaxation may not
1462 // contain all the constraints, such a solution is not necessarily a solution
1463 // of the full problem.
1464 optional bool exploit_integer_lp_solution = 94 [default = true];
1466 // If true and the Lp relaxation of the problem has a solution, try to exploit
1467 // it. This is same as above except in this case the lp solution might not be
1468 // an integer solution.
1469 optional bool exploit_all_lp_solution = 116 [default = true];
1471 // When branching on a variable, follow the last best solution value.
1472 optional bool exploit_best_solution = 130 [default = false];
1474 // When branching on a variable, follow the last best relaxation solution
1475 // value. We use the relaxation with the tightest bound on the objective as
1476 // the best relaxation solution.
1477 optional bool exploit_relaxation_solution = 161 [default = false];
1479 // When branching an a variable that directly affect the objective,
1480 // branch on the value that lead to the best objective first.
1481 optional bool exploit_objective = 131 [default = true];
1483 // Infer products of Boolean or of Boolean time IntegerVariable from the
1484 // linear constrainst in the problem. This can be used in some cuts, altough
1485 // for now we don't really exploit it.
1486 optional bool detect_linearized_product = 277 [default = false];
1488 // ==========================================================================
1489 // MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are
1490 // used by our automatic "scaling" algorithm.
1492 // Note that it is hard to do a meaningful conversion automatically and if
1493 // you have a model with continuous variables, it is best if you scale the
1494 // domain of the variable yourself so that you have a relevant precision for
1495 // the application at hand. Same for the coefficients and constraint bounds.
1496 // ==========================================================================
1498 // We need to bound the maximum magnitude of the variables for CP-SAT, and
1499 // that is the bound we use. If the MIP model expect larger variable value in
1500 // the solution, then the converted model will likely not be relevant.
1501 optional double mip_max_bound = 124 [default = 1e7];
1503 // All continuous variable of the problem will be multiplied by this factor.
1504 // By default, we don't do any variable scaling and rely on the MIP model to
1505 // specify continuous variable domain with the wanted precision.
1506 optional double mip_var_scaling = 125 [default = 1.0];
1508 // If this is false, then mip_var_scaling is only applied to variables with
1509 // "small" domain. If it is true, we scale all floating point variable
1510 // independenlty of their domain.
1511 optional bool mip_scale_large_domain = 225 [default = false];
1513 // If true, some continuous variable might be automatically scaled. For now,
1514 // this is only the case where we detect that a variable is actually an
1515 // integer multiple of a constant. For instance, variables of the form k * 0.5
1516 // are quite frequent, and if we detect this, we will scale such variable
1517 // domain by 2 to make it implied integer.
1518 optional bool mip_automatically_scale_variables = 166 [default = true];
1520 // If one try to solve a MIP model with CP-SAT, because we assume all variable
1521 // to be integer after scaling, we will not necessarily have the correct
1522 // optimal. Note however that all feasible solutions are valid since we will
1523 // just solve a more restricted version of the original problem.
1525 // This parameters is here to prevent user to think the solution is optimal
1526 // when it might not be. One will need to manually set this to false to solve
1527 // a MIP model where the optimal might be different.
1529 // Note that this is tested after some MIP presolve steps, so even if not
1530 // all original variable are integer, we might end up with a pure IP after
1531 // presolve and after implied integer detection.
1532 optional bool only_solve_ip = 222 [default = false];
1534 // When scaling constraint with double coefficients to integer coefficients,
1535 // we will multiply by a power of 2 and round the coefficients. We will choose
1536 // the lowest power such that we have no potential overflow (see
1537 // mip_max_activity_exponent) and the worst case constraint activity error
1538 // does not exceed this threshold.
1540 // Note that we also detect constraint with rational coefficients and scale
1541 // them accordingly when it seems better instead of using a power of 2.
1543 // We also relax all constraint bounds by this absolute value. For pure
1544 // integer constraint, if this value if lower than one, this will not change
1545 // anything. However it is needed when scaling MIP problems.
1547 // If we manage to scale a constraint correctly, the maximum error we can make
1548 // will be twice this value (once for the scaling error and once for the
1549 // relaxed bounds). If we are not able to scale that well, we will display
1550 // that fact but still scale as best as we can.
1551 optional double mip_wanted_precision = 126 [default = 1e-6];
1553 // To avoid integer overflow, we always force the maximum possible constraint
1554 // activity (and objective value) according to the initial variable domain to
1555 // be smaller than 2 to this given power. Because of this, we cannot always
1556 // reach the "mip_wanted_precision" parameter above.
1558 // This can go as high as 62, but some internal algo currently abort early if
1559 // they might run into integer overflow, so it is better to keep it a bit
1561 optional int32 mip_max_activity_exponent = 127 [default = 53];
1563 // As explained in mip_precision and mip_max_activity_exponent, we cannot
1564 // always reach the wanted precision during scaling. We use this threshold to
1565 // enphasize in the logs when the precision seems bad.
1566 optional double mip_check_precision = 128 [default = 1e-4];
1568 // Even if we make big error when scaling the objective, we can always derive
1569 // a correct lower bound on the original objective by using the exact lower
1570 // bound on the scaled integer version of the objective. This should be fast,
1571 // but if you don't care about having a precise lower bound, you can turn it
1573 optional bool mip_compute_true_objective_bound = 198 [default = true];
1575 // Any finite values in the input MIP must be below this threshold, otherwise
1576 // the model will be reported invalid. This is needed to avoid floating point
1577 // overflow when evaluating bounds * coeff for instance. We are a bit more
1578 // defensive, but in practice, users shouldn't use super large values in a
1580 optional double mip_max_valid_magnitude = 199 [default = 1e20];
1582 // By default, any variable/constraint bound with a finite value and a
1583 // magnitude greater than the mip_max_valid_magnitude will result with a
1584 // invalid model. This flags change the behavior such that such bounds are
1585 // silently transformed to +∞ or -∞.
1587 // It is recommended to keep it at false, and create valid bounds.
1588 optional bool mip_treat_high_magnitude_bounds_as_infinity = 278
1591 // Any value in the input mip with a magnitude lower than this will be set to
1592 // zero. This is to avoid some issue in LP presolving.
1593 optional double mip_drop_tolerance = 232 [default = 1e-16];
1595 // When solving a MIP, we do some basic floating point presolving before
1596 // scaling the problem to integer to be handled by CP-SAT. This control how
1597 // much of that presolve we do. It can help to better scale floating point
1598 // model, but it is not always behaving nicely.
1599 optional int32 mip_presolve_level = 261 [default = 2];