1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
6// http://www.apache.org/licenses/LICENSE-2.0
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
14// Contains the definitions for all the GLOP algorithm parameters and their
17// TODO(user): Make the above statement true by moving all algorithm parameters
22package operations_research.glop;
24option java_package = "com.google.ortools.glop";
25option java_multiple_files = true;
26option csharp_namespace = "Google.OrTools.Glop";
28message GlopParameters {
29 // Supported algorithms for scaling:
30 // EQUILIBRATION - progressive scaling by row and column norms until the
31 // marginal difference passes below a threshold.
32 // LINEAR_PROGRAM - EXPERIMENTAL: finding optimal scale factors using a linear
33 // program in the log scale.
34 enum ScalingAlgorithm {
40 // Like a Boolean with an extra value to let the algorithm decide what is the
45 LET_SOLVER_DECIDE = 2;
48 // General strategy used during pricing.
50 // Strategy using only the reduced cost of a variable.
52 // Note that compared to a textbook rule, we normalize the reduced cost of a
53 // variable using the norm of the associated column. This improves quite a
54 // bit the rule at almost no extra complexity. See the first paper from
55 // Ping-Qi Pan cited in primal_edge_norms.h.
58 // Normalize the reduced costs by the norm of the edges. Since computing
59 // norms at each step is too expensive, reduced costs and norms are
60 // updated iteratively from one iteration to the next.
63 // Normalize the reduced costs by an approximation of the norm of the edges.
64 // This should offer a good tradeoff between steepest edge and speed.
68 // Heuristics to use in the primal simplex to remove fixed slack variables
69 // from the initial basis.
70 enum InitialBasisHeuristic {
71 // Leave the fixed slack variables in the basis.
74 // Use the heuristic described in:
75 // Robert E. Bixby, "Implementing the Simplex Method: The Initial Basis"
76 // ORSA Jounal on Computing, Vol. 4, No. 3, Summer 1992.
77 // http://joc.journal.informs.org/content/4/3/267.abstract
79 // It requires use_scaling to be true, otherwise it behaves like NONE.
82 // Replace the fixed columns while keeping the initial basis triangular. The
83 // heuristic to select which column to use first is similar to the one used
84 // for BIXBY. This algorithm is similar to the "advanced initial basis"
85 // GLPK uses by default. Both algorithm produce a triangular initial basis,
86 // however the heuristics used are not exactly the same.
89 // Use a version of Maros's triangular feasibility crash
90 // https://books.google.fr/books?isbn=1461502578
95 optional ScalingAlgorithm scaling_method = 57 [default = EQUILIBRATION];
97 // PricingRule to use during the feasibility phase.
98 optional PricingRule feasibility_rule = 1 [default = STEEPEST_EDGE];
100 // PricingRule to use during the optimization phase.
101 optional PricingRule optimization_rule = 2 [default = STEEPEST_EDGE];
103 // We estimate the factorization accuracy of B during each pivot by using
104 // the fact that we can compute the pivot coefficient in two ways:
105 // - From direction[leaving_row].
106 // - From update_row[entering_column].
107 // If the two values have a relative difference above this threshold, we
108 // trigger a refactorization.
109 optional double refactorization_threshold = 6 [default = 1e-9];
111 // We estimate the accuracy of the iteratively computed reduced costs. If
112 // it falls below this threshold, we reinitialize them from scratch. Note
113 // that such an operation is pretty fast, so we can use a low threshold.
114 // It is important to have a good accuracy here (better than the
115 // dual_feasibility_tolerance below) to be sure of the sign of such a cost.
116 optional double recompute_reduced_costs_threshold = 8 [default = 1e-8];
118 // Note that the threshold is a relative error on the actual norm (not the
119 // squared one) and that edge norms are always greater than 1. Recomputing
120 // norms is a really expensive operation and a large threshold is ok since
121 // this doesn't impact directly the solution but just the entering variable
123 optional double recompute_edges_norm_threshold = 9 [default = 100.0];
125 // The three following parameters correspond to the tolerance epsilon 1,2 and
126 // 3 described in Chvatal p. 115 in the section "Zero Tolerances".
128 // This tolerance indicates by how much we allow the variable values to go out
129 // of bounds and still consider the current solution primal-feasible. We also
130 // use the same tolerance for the error A.x - b. Note that the two errors are
131 // closely related if A is scaled in such a way that the greatest coefficient
132 // magnitude on each column is 1.0.
134 // This is also simply called feasibility tolerance in other solvers.
135 optional double primal_feasibility_tolerance = 10 [default = 1e-8];
137 // Variables whose reduced costs have an absolute value smaller than this
138 // tolerance are not considered as entering candidates. That is they do not
139 // take part in deciding whether a solution is dual-feasible or not.
141 // Note that this value can temporarily increase during the execution of the
142 // algorithm if the estimated precision of the reduced costs is higher than
143 // this tolerance. Note also that we scale the costs (in the presolve step) so
144 // that the cost magnitude range contains one.
146 // This is also known as the optimality tolerance in other solvers.
147 optional double dual_feasibility_tolerance = 11 [default = 1e-8];
149 // During the primal simplex (resp. dual simplex), the coefficients of the
150 // direction (resp. update row) with a magnitude lower than this threshold are
151 // not considered during the ratio test. This tolerance is related to the
152 // precision at which a Solve() involving the basis matrix can be performed.
154 // TODO(user): Automatically increase it when we detect that the precision
155 // of the Solve() is worse than this.
156 optional double ratio_test_zero_threshold = 12 [default = 1e-9];
158 // This impacts the ratio test and indicates by how much we allow a basic
159 // variable value that we move to go out of bounds. The value should be in
160 // [0.0, 1.0) and should be interpreted as a ratio of the
161 // primal_feasibility_tolerance. Setting this to 0.0 basically disables the
162 // Harris ratio test while setting this too close to 1.0 will make it
163 // difficult to keep the variable values inside their bounds modulo the
164 // primal_feasibility_tolerance.
166 // Note that the same comment applies to the dual simplex ratio test. There,
167 // we allow the reduced costs to be of an infeasible sign by as much as this
168 // ratio times the dual_feasibility_tolerance.
169 optional double harris_tolerance_ratio = 13 [default = 0.5];
171 // When we choose the leaving variable, we want to avoid small pivot because
172 // they are the less precise and may cause numerical instabilities. For a
173 // pivot under this threshold times the infinity norm of the direction, we try
174 // various countermeasures in order to avoid using it.
175 optional double small_pivot_threshold = 14 [default = 1e-6];
177 // We never follow a basis change with a pivot under this threshold.
178 optional double minimum_acceptable_pivot = 15 [default = 1e-6];
180 // In order to increase the sparsity of the manipulated vectors, floating
181 // point values with a magnitude smaller than this parameter are set to zero
182 // (only in some places). This parameter should be positive or zero.
183 optional double drop_tolerance = 52 [default = 1e-14];
185 // Whether or not we scale the matrix A so that the maximum coefficient on
186 // each line and each column is 1.0.
187 optional bool use_scaling = 16 [default = true];
189 // This is only used if use_scaling is true. After the scaling is done, we
190 // also scale the objective by a constant factor. This is important because
191 // scaling the cost has a direct influence on the meaning of the
192 // dual_feasibility_tolerance. Because we usually use a fixed tolerance, the
193 // objective must be well scaled to make sense.
194 enum CostScalingAlgorithm {
195 // Leave the cost as is.
198 // This is the most defensive option. It makes sure that
199 // [min_cost_magnitude, max_cost_magnitude] contains 1.0, and if not, it
200 // makes the closest magnitude bound equal to one.
201 CONTAIN_ONE_COST_SCALING = 1;
203 // Make the mean of the non-zero costs equals to one.
204 MEAN_COST_SCALING = 2;
206 // Make the median of the non-zero costs equals to one.
207 MEDIAN_COST_SCALING = 3;
209 optional CostScalingAlgorithm cost_scaling = 60
210 [default = CONTAIN_ONE_COST_SCALING];
212 // What heuristic is used to try to replace the fixed slack columns in the
213 // initial basis of the primal simplex.
214 optional InitialBasisHeuristic initial_basis = 17 [default = TRIANGULAR];
216 // Whether or not we keep a transposed version of the matrix A to speed-up the
217 // pricing at the cost of extra memory and the initial tranposition
219 optional bool use_transposed_matrix = 18 [default = true];
221 // Number of iterations between two basis refactorizations. Note that various
222 // conditions in the algorithm may trigger a refactorization before this
223 // period is reached. Set this to 0 if you want to refactorize at each step.
224 optional int32 basis_refactorization_period = 19 [default = 64];
226 // If this is true, then basis_refactorization_period becomes a lower bound on
227 // the number of iterations between two refactorization (provided there is no
228 // numerical accuracy issues). Depending on the estimated time to refactorize
229 // vs the extra time spend in each solves because of the LU update, we try to
230 // balance the two times.
231 optional bool dynamically_adjust_refactorization_period = 63 [default = true];
233 // Whether or not we solve the dual of the given problem.
234 // With a value of auto, the algorithm decide which approach is probably the
235 // fastest depending on the problem dimensions (see dualizer_threshold).
236 optional SolverBehavior solve_dual_problem = 20 [default = LET_SOLVER_DECIDE];
238 // When solve_dual_problem is LET_SOLVER_DECIDE, take the dual if the number
239 // of constraints of the problem is more than this threshold times the number
241 optional double dualizer_threshold = 21 [default = 1.5];
243 // When the problem status is OPTIMAL, we check the optimality using this
244 // relative tolerance and change the status to IMPRECISE if an issue is
247 // The tolerance is "relative" in the sense that our thresholds are:
248 // - tolerance * max(1.0, abs(bound)) for crossing a given bound.
249 // - tolerance * max(1.0, abs(cost)) for an infeasible reduced cost.
250 // - tolerance for an infeasible dual value.
251 optional double solution_feasibility_tolerance = 22 [default = 1e-6];
253 // If true, then when the solver returns a solution with an OPTIMAL status,
254 // we can guarantee that:
255 // - The primal variable are in their bounds.
256 // - The dual variable are in their bounds.
257 // - If we modify each component of the right-hand side a bit and each
258 // component of the objective function a bit, then the pair (primal values,
259 // dual values) is an EXACT optimal solution of the perturbed problem.
260 // - The modifications above are smaller than the associated tolerances as
261 // defined in the comment for solution_feasibility_tolerance (*).
263 // (*): This is the only place where the guarantee is not tight since we
264 // compute the upper bounds with scalar product of the primal/dual
265 // solution and the initial problem coefficients with only double precision.
267 // Note that whether or not this option is true, we still check the
268 // primal/dual infeasibility and objective gap. However if it is false, we
269 // don't move the primal/dual values within their bounds and leave them
271 optional bool provide_strong_optimal_guarantee = 24 [default = true];
273 // If true, the internal API will change the return status to imprecise if the
274 // solution does not respect the internal tolerances.
275 optional bool change_status_to_imprecise = 58 [default = true];
277 // When the solution of phase II is imprecise, we re-run the phase II with the
278 // opposite algorithm from that imprecise solution (i.e., if primal or dual
279 // simplex was used, we use dual or primal simplex, respectively). We repeat
280 // such re-optimization until the solution is precise, or we hit this limit.
281 optional double max_number_of_reoptimizations = 56 [default = 40];
283 // Threshold for LU-factorization: for stability reasons, the magnitude of the
284 // chosen pivot at a given step is guaranteed to be greater than this
285 // threshold times the maximum magnitude of all the possible pivot choices in
286 // the same column. The value must be in [0,1].
287 optional double lu_factorization_pivot_threshold = 25 [default = 0.01];
289 // Maximum time allowed in seconds to solve a problem.
290 optional double max_time_in_seconds = 26 [default = inf];
292 // Maximum deterministic time allowed to solve a problem. The deterministic
293 // time is more or less correlated to the running time, and its unit should
294 // be around the second (at least on a Xeon(R) CPU E5-1650 v2 @ 3.50GHz).
296 // TODO(user): Improve the correlation.
297 optional double max_deterministic_time = 45 [default = inf];
299 // Maximum number of simplex iterations to solve a problem.
300 // A value of -1 means no limit.
301 optional int64 max_number_of_iterations = 27 [default = -1];
303 // How many columns do we look at in the Markowitz pivoting rule to find
304 // a good pivot. See markowitz.h.
305 optional int32 markowitz_zlatev_parameter = 29 [default = 3];
307 // If a pivot magnitude is smaller than this during the Markowitz LU
308 // factorization, then the matrix is assumed to be singular. Note that
309 // this is an absolute threshold and is not relative to the other possible
310 // pivots on the same column (see lu_factorization_pivot_threshold).
311 optional double markowitz_singularity_threshold = 30 [default = 1e-15];
313 // Whether or not we use the dual simplex algorithm instead of the primal.
314 optional bool use_dual_simplex = 31 [default = false];
316 // During incremental solve, let the solver decide if it use the primal or
317 // dual simplex algorithm depending on the current solution and on the new
318 // problem. Note that even if this is true, the value of use_dual_simplex
319 // still indicates the default algorithm that the solver will use.
320 optional bool allow_simplex_algorithm_change = 32 [default = false];
322 // Devex weights will be reset to 1.0 after that number of updates.
323 optional int32 devex_weights_reset_period = 33 [default = 150];
325 // Whether or not we use advanced preprocessing techniques.
326 optional bool use_preprocessing = 34 [default = true];
328 // Whether or not to use the middle product form update rather than the
329 // standard eta LU update. The middle form product update should be a lot more
330 // efficient (close to the Forrest-Tomlin update, a bit slower but easier to
331 // implement). See for more details:
332 // Qi Huangfu, J. A. Julian Hall, "Novel update techniques for the revised
333 // simplex method", 28 january 2013, Technical Report ERGO-13-0001
334 // http://www.maths.ed.ac.uk/hall/HuHa12/ERGO-13-001.pdf
335 optional bool use_middle_product_form_update = 35 [default = true];
337 // Whether we initialize devex weights to 1.0 or to the norms of the matrix
339 optional bool initialize_devex_with_column_norms = 36 [default = true];
341 // Whether or not we exploit the singleton columns already present in the
342 // problem when we create the initial basis.
343 optional bool exploit_singleton_column_in_initial_basis = 37 [default = true];
345 // Like small_pivot_threshold but for the dual simplex. This is needed because
346 // the dual algorithm does not interpret this value in the same way.
347 // TODO(user): Clean this up and use the same small pivot detection.
348 optional double dual_small_pivot_threshold = 38 [default = 1e-4];
350 // A floating point tolerance used by the preprocessors. This is used for
351 // things like detecting if two columns/rows are proportional or if an
352 // interval is empty.
354 // Note that the preprocessors also use solution_feasibility_tolerance() to
355 // detect if a problem is infeasible.
356 optional double preprocessor_zero_tolerance = 39 [default = 1e-9];
358 // The solver will stop as soon as it has proven that the objective is smaller
359 // than objective_lower_limit or greater than objective_upper_limit. Depending
360 // on the simplex algorithm (primal or dual) and the optimization direction,
361 // note that only one bound will be used at the time.
363 // Important: The solver does not add any tolerances to these values, and as
364 // soon as the objective (as computed by the solver, so with some imprecision)
365 // crosses one of these bounds (strictly), the search will stop. It is up to
366 // the client to add any tolerance if needed.
367 optional double objective_lower_limit = 40 [default = -inf];
368 optional double objective_upper_limit = 41 [default = inf];
370 // During a degenerate iteration, the more conservative approach is to do a
371 // step of length zero (while shifting the bound of the leaving variable).
372 // That is, the variable values are unchanged for the primal simplex or the
373 // reduced cost are unchanged for the dual simplex. However, instead of doing
374 // a step of length zero, it seems to be better on degenerate problems to do a
375 // small positive step. This is what is recommended in the EXPAND procedure
377 // P. E. Gill, W. Murray, M. A. Saunders, and M. H. Wright. "A practical anti-
378 // cycling procedure for linearly constrained optimization".
379 // Mathematical Programming, 45:437\u2013474, 1989.
381 // Here, during a degenerate iteration we do a small positive step of this
382 // factor times the primal (resp. dual) tolerance. In the primal simplex, this
383 // may effectively push variable values (very slightly) further out of their
384 // bounds (resp. reduced costs for the dual simplex).
386 // Setting this to zero reverts to the more conservative approach of a zero
387 // step during degenerate iterations.
388 optional double degenerate_ministep_factor = 42 [default = 0.01];
390 // At the beginning of each solve, the random number generator used in some
391 // part of the solver is reinitialized to this seed. If you change the random
392 // seed, the solver may make different choices during the solving process.
393 // Note that this may lead to a different solution, for example a different
396 // For some problems, the running time may vary a lot depending on small
397 // change in the solving algorithm. Running the solver with different seeds
398 // enables to have more robust benchmarks when evaluating new features.
400 // Also note that the solver is fully deterministic: two runs of the same
401 // binary, on the same machine, on the exact same data and with the same
402 // parameters will go through the exact same iterations. If they hit a time
403 // limit, they might of course yield different results because one will have
404 // advanced farther than the other.
405 optional int32 random_seed = 43 [default = 1];
407 // Number of threads in the OMP parallel sections. If left to 1, the code will
408 // not create any OMP threads and will remain single-threaded.
409 optional int32 num_omp_threads = 44 [default = 1];
411 // When this is true, then the costs are randomly perturbed before the dual
412 // simplex is even started. This has been shown to improve the dual simplex
413 // performance. For a good reference, see Huangfu Q (2013) "High performance
414 // simplex solver", Ph.D, dissertation, University of Edinburgh.
415 optional bool perturb_costs_in_dual_simplex = 53 [default = false];
417 // We have two possible dual phase I algorithms. Both work on an LP that
418 // minimize the sum of dual infeasiblities. One use dedicated code (when this
419 // param is true), the other one use exactly the same code as the dual phase
420 // II but on an auxiliary problem where the variable bounds of the original
421 // problem are changed.
423 // TODO(user): For now we have both, but ideally the non-dedicated version
424 // will win since it is a lot less code to maintain.
425 optional bool use_dedicated_dual_feasibility_algorithm = 62 [default = true];
427 // The magnitude of the cost perturbation is given by
428 // RandomIn(1.0, 2.0) * (
429 // relative_cost_perturbation * cost
430 // + relative_max_cost_perturbation * max_cost);
431 optional double relative_cost_perturbation = 54 [default = 1e-5];
432 optional double relative_max_cost_perturbation = 55 [default = 1e-7];
434 // If our upper bound on the condition number of the initial basis (from our
435 // heurisitic or a warm start) is above this threshold, we revert to an all
437 optional double initial_condition_number_threshold = 59 [default = 1e50];
439 // If true, logs the progress of a solve to LOG(INFO). Note that the same
440 // messages can also be turned on by displaying logs at level 1 for the
442 optional bool log_search_progress = 61 [default = false];
444 // If true, logs will be displayed to stdout instead of using Google log info.
445 optional bool log_to_stdout = 66 [default = true];
447 // If the starting basis contains FREE variable with bounds, we will move
448 // any such variable to their closer bounds if the distance is smaller than
451 // The starting statuses can contains FREE variables with bounds, if a user
452 // set it like this externally. Also, any variable with an initial BASIC
453 // status that was not kept in the initial basis is marked as FREE before this
456 // Note that by default a FREE variable is assumed to be zero unless a
457 // starting value was specified via SetStartingVariableValuesForNextSolve().
459 // Note that, at the end of the solve, some of these FREE variable with bounds
460 // and an interior point value might still be left in the final solution.
461 // Enable push_to_vertex to clean these up.
462 optional double crossover_bound_snapping_distance = 64 [default = inf];
464 // If the optimization phases finishes with super-basic variables (i.e.,
465 // variables that either 1) have bounds but are FREE in the basis, or 2) have
466 // no bounds and are FREE in the basis at a nonzero value), then run a "push"
467 // phase to push these variables to bounds, obtaining a vertex solution. Note
468 // this situation can happen only if a starting value was specified via
469 // SetStartingVariableValuesForNextSolve().
470 optional bool push_to_vertex = 65 [default = true];
472 // If presolve runs, include the pass that detects implied free variables.
473 optional bool use_implied_free_preprocessor = 67 [default = true];
475 // Any finite values in the input LP must be below this threshold, otherwise
476 // the model will be reported invalid. This is needed to avoid floating point
477 // overflow when evaluating bounds * coeff for instance. In practice, users
478 // shouldn't use super large values in an LP. With the default threshold, even
479 // evaluating large constraint with variables at their bound shouldn't cause
481 optional double max_valid_magnitude = 70 [default = 1e30];
483 // Value in the input LP lower than this will be ignored. This is similar to
484 // drop_tolerance but more aggressive as this is used before scaling. This is
485 // mainly here to avoid underflow and have simpler invariant in the code, like
486 // a * b == 0 iff a or b is zero and things like this.
487 optional double drop_magnitude = 71 [default = 1e-30];
489 // On some problem like stp3d or pds-100 this makes a huge difference in
490 // speed and number of iterations of the dual simplex.
491 optional bool dual_price_prioritize_norm = 69 [default = false];