Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
revised_simplex.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <cstdlib>
20#include <functional>
21#include <map>
22#include <string>
23#include <utility>
24#include <vector>
25
26#include "absl/flags/flag.h"
27#include "absl/log/check.h"
28#include "absl/random/bit_gen_ref.h"
29#include "absl/random/random.h"
30#include "absl/random/seed_sequences.h"
31#include "absl/strings/match.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/str_format.h"
34#include "absl/strings/string_view.h"
39#include "ortools/glop/parameters.pb.h"
40#include "ortools/glop/status.h"
54#include "ortools/util/stats.h"
56
57ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
58 "Display numbers as fractions.");
59ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
60 "Stop after first basis has been computed.");
61ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
62 "Stop after first phase has been completed.");
63ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
64
65namespace operations_research {
66namespace glop {
67namespace {
68
69// Calls the given closure upon destruction. It can be used to ensure that a
70// closure is executed whenever a function returns.
71class Cleanup {
72 public:
73 explicit Cleanup(std::function<void()> closure)
74 : closure_(std::move(closure)) {}
75 ~Cleanup() { closure_(); }
76
77 private:
78 std::function<void()> closure_;
79};
80} // namespace
81
82#define DCHECK_COL_BOUNDS(col) \
83 { \
84 DCHECK_LE(0, col); \
85 DCHECK_GT(num_cols_, col); \
86 }
87
88// TODO(user): Remove this function.
89#define DCHECK_ROW_BOUNDS(row) \
90 { \
91 DCHECK_LE(0, row); \
92 DCHECK_GT(num_rows_, row); \
93 }
94
95constexpr const uint64_t kDeterministicSeed = 42;
96
97namespace {
98
99bool UseAbslRandom() { return false; }
100
101} // namespace
102
104 : problem_status_(ProblemStatus::INIT),
105 objective_(),
106 basis_(),
107 variable_name_(),
108 direction_(),
109 error_(),
110 random_(UseAbslRandom() ? absl::BitGenRef(absl_random_)
111 : absl::BitGenRef(deterministic_random_)),
112 basis_factorization_(&compact_matrix_, &basis_),
113 variables_info_(compact_matrix_),
114 primal_edge_norms_(compact_matrix_, variables_info_,
115 basis_factorization_),
116 dual_edge_norms_(basis_factorization_),
117 dual_prices_(random_),
118 variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
119 basis_factorization_, &dual_edge_norms_, &dual_prices_),
120 update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
121 basis_factorization_),
122 reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
123 basis_factorization_, random_),
124 entering_variable_(variables_info_, random_, &reduced_costs_),
125 primal_prices_(random_, variables_info_, &primal_edge_norms_,
126 &reduced_costs_),
127 iteration_stats_(),
128 ratio_test_stats_(),
129 function_stats_("SimplexFunctionStats"),
130 parameters_(),
131 test_lu_() {
132 SetParameters(parameters_);
133}
134
136 SCOPED_TIME_STAT(&function_stats_);
137 solution_state_.statuses.clear();
138 variable_starting_values_.clear();
139}
140
142 // We avoid marking the state as set externally if it is the same as the
143 // current one.
144 //
145 // TODO(user): Add comparison operator.
146 if (state.statuses == solution_state_.statuses) return;
147
148 solution_state_ = state;
149 solution_state_has_been_set_externally_ = true;
150}
151
153 const DenseRow& values) {
154 variable_starting_values_ = values;
155}
156
158 const DenseRow& objective, Fractional objective_scaling_factor,
159 Fractional objective_offset, TimeLimit* time_limit) {
160 const double start_time = time_limit->GetElapsedTime();
161 default_logger_.EnableLogging(parameters_.log_search_progress());
162 default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
163 parameters_ = initial_parameters_;
164 PropagateParameters();
165
166 // The source of truth is the transposed matrix.
167 if (transpose_was_changed_) {
168 compact_matrix_.PopulateFromTranspose(transposed_matrix_);
169 num_rows_ = compact_matrix_.num_rows();
170 num_cols_ = compact_matrix_.num_cols();
171 first_slack_col_ = num_cols_ - RowToColIndex(num_rows_);
172 }
173
174 DCHECK_EQ(num_cols_, objective.size());
175
176 // Copy objective
177 objective_scaling_factor_ = objective_scaling_factor;
178 objective_offset_ = objective_offset;
179 const bool objective_is_unchanged = objective_ == objective;
180 objective_ = objective;
181 InitializeObjectiveLimit();
182
183 // Initialize variable infos from the mutated bounds.
184 variables_info_.InitializeFromMutatedState();
185
186 if (objective_is_unchanged && parameters_.use_dual_simplex() &&
187 !transpose_was_changed_ && !solution_state_has_been_set_externally_ &&
188 !solution_state_.IsEmpty()) {
189 // Fast track if we just changed variable bounds.
190 primal_edge_norms_.Clear();
191 variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
192 solution_state_);
193 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
194 variable_values_.RecomputeBasicVariableValues();
195 return SolveInternal(start_time, false, objective, time_limit);
196 } else {
197 GLOP_RETURN_IF_ERROR(FinishInitialization(true));
198 }
199
200 return SolveInternal(start_time, false, objective, time_limit);
201}
202
204 const double start_time = time_limit->GetElapsedTime();
205 default_logger_.EnableLogging(parameters_.log_search_progress());
206 default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
207
208 DCHECK(lp.IsCleanedUp());
209 GLOP_RETURN_IF_ERROR(Initialize(lp));
210 return SolveInternal(start_time, lp.IsMaximizationProblem(),
212}
213
214ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal(
215 double start_time, bool is_maximization_problem,
216 const DenseRow& objective_coefficients, TimeLimit* time_limit) {
217 SCOPED_TIME_STAT(&function_stats_);
219 Cleanup update_deterministic_time_on_return(
220 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
221
222 SOLVER_LOG(logger_, "");
223 primal_edge_norms_.SetTimeLimit(time_limit);
224 dual_edge_norms_.SetTimeLimit(time_limit);
225
226 if (logger_->LoggingIsEnabled()) {
227 DisplayBasicVariableStatistics();
228 }
229
230 dual_infeasibility_improvement_direction_.clear();
231 update_row_.Invalidate();
232 test_lu_.Clear();
233 problem_status_ = ProblemStatus::INIT;
234 phase_ = Phase::FEASIBILITY;
235 num_iterations_ = 0;
236 num_feasibility_iterations_ = 0;
237 num_optimization_iterations_ = 0;
238 num_push_iterations_ = 0;
239 feasibility_time_ = 0.0;
240 optimization_time_ = 0.0;
241 push_time_ = 0.0;
242 total_time_ = 0.0;
243
244 // In case we abort because of an error, we cannot assume that the current
245 // solution state will be in sync with all our internal data structure. In
246 // case we abort without resetting it, setting this allow us to still use the
247 // previous state info, but we will double-check everything.
248 solution_state_has_been_set_externally_ = true;
249
250 if (VLOG_IS_ON(2)) {
251 ComputeNumberOfEmptyRows();
252 ComputeNumberOfEmptyColumns();
253 DisplayProblem();
254 }
255 if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
256 DisplayAllStats();
257 return Status::OK();
258 }
259
260 const bool use_dual = parameters_.use_dual_simplex();
261
262 // TODO(user): Avoid doing the first phase checks when we know from the
263 // incremental solve that the solution is already dual or primal feasible.
264 SOLVER_LOG(logger_, "");
265 primal_edge_norms_.SetPricingRule(parameters_.feasibility_rule());
266 if (use_dual) {
267 if (parameters_.perturb_costs_in_dual_simplex()) {
268 reduced_costs_.PerturbCosts();
269 }
270
271 if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
272 variables_info_.MakeBoxedVariableRelevant(false);
274 DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
275
276 if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
277 // Note(user): In most cases, the matrix will already be refactorized
278 // and both Refactorize() and PermuteBasis() will do nothing. However,
279 // if the time limit is reached during the first phase, this might not
280 // be the case and RecomputeBasicVariableValues() below DCHECKs that the
281 // matrix is refactorized. This is not required, but we currently only
282 // want to recompute values from scratch when the matrix was just
283 // refactorized to maximize precision.
284 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
285 PermuteBasis();
286
287 variables_info_.MakeBoxedVariableRelevant(true);
288 reduced_costs_.MakeReducedCostsPrecise();
289
290 // This is needed to display errors properly.
291 MakeBoxedVariableDualFeasible(
292 variables_info_.GetNonBasicBoxedVariables(),
293 /*update_basic_values=*/false);
294 variable_values_.RecomputeBasicVariableValues();
295 }
296 } else {
297 // Test initial dual infeasibility, ignoring boxed variables. We currently
298 // refactorize/recompute the reduced costs if not already done.
299 // TODO(user): Not ideal in an incremental setting.
300 reduced_costs_.MakeReducedCostsPrecise();
301 bool refactorize = reduced_costs_.NeedsBasisRefactorization();
302 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
303
304 const Fractional initial_infeasibility =
305 reduced_costs_.ComputeMaximumDualInfeasibilityOnNonBoxedVariables();
306 if (initial_infeasibility <
307 reduced_costs_.GetDualFeasibilityTolerance()) {
308 SOLVER_LOG(logger_, "Initial basis is dual feasible.");
309 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
310 MakeBoxedVariableDualFeasible(
311 variables_info_.GetNonBasicBoxedVariables(),
312 /*update_basic_values=*/false);
313 variable_values_.RecomputeBasicVariableValues();
314 } else {
315 // Transform problem and recompute variable values.
316 variables_info_.TransformToDualPhaseIProblem(
317 reduced_costs_.GetDualFeasibilityTolerance(),
318 reduced_costs_.GetReducedCosts());
319 DenseRow zero; // We want the FREE variable at zero here.
320 variable_values_.ResetAllNonBasicVariableValues(zero);
321 variable_values_.RecomputeBasicVariableValues();
322
323 // Optimize.
324 DisplayErrors();
325 GLOP_RETURN_IF_ERROR(DualMinimize(false, time_limit));
326
327 // Restore original problem and recompute variable values. Note that we
328 // need the reduced cost on the fixed positions here.
329 variables_info_.EndDualPhaseI(
330 reduced_costs_.GetDualFeasibilityTolerance(),
331 reduced_costs_.GetFullReducedCosts());
332 variable_values_.ResetAllNonBasicVariableValues(
333 variable_starting_values_);
334 variable_values_.RecomputeBasicVariableValues();
335
336 // TODO(user): Note that if there was cost shifts, we just keep them
337 // until the end of the optim.
338 //
339 // TODO(user): What if slightly infeasible? we shouldn't really stop.
340 // Call primal ? use higher tolerance ? It seems we can always kind of
341 // continue and deal with the issue later. Find a way other than this +
342 // 1e-6 hack.
343 if (problem_status_ == ProblemStatus::OPTIMAL) {
344 if (reduced_costs_.ComputeMaximumDualInfeasibility() <
345 reduced_costs_.GetDualFeasibilityTolerance() + 1e-6) {
346 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
347 } else {
348 SOLVER_LOG(logger_, "Infeasible after first phase.");
349 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
350 }
351 }
352 }
353 }
354 } else {
355 GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
356
357 // After the primal phase I, we need to restore the objective.
358 if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
359 objective_ = objective_coefficients;
360 if (is_maximization_problem) {
361 for (Fractional& value : objective_) {
362 value = -value;
363 }
364 }
365 objective_.resize(num_cols_, 0.0); // For the slack.
366 reduced_costs_.ResetForNewObjective();
367 }
368 }
369
370 DisplayErrors();
371
372 phase_ = Phase::OPTIMIZATION;
373 feasibility_time_ = time_limit->GetElapsedTime() - start_time;
374 primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
375 num_feasibility_iterations_ = num_iterations_;
376
377 // Because of shifts or perturbations, we may need to re-run a dual simplex
378 // after the primal simplex finished, or the opposite.
379 //
380 // We alter between solving with primal and dual Phase II algorithm as long as
381 // time limit permits *and* we did not yet achieve the desired precision.
382 // I.e., we run iteration i if the solution from iteration i-1 was not precise
383 // after we removed the bound and cost shifts and perturbations.
384 //
385 // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
386 // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
387 // (i.e., these statuses are not necesserily a consequence of hitting a time
388 // limit).
389 SOLVER_LOG(logger_, "");
390 for (int num_optims = 0;
391 // We want to enter the loop when both num_optims and num_iterations_ are
392 // *equal* to the corresponding limits (to return a meaningful status
393 // when the limits are set to 0).
394 num_optims <= parameters_.max_number_of_reoptimizations() &&
395 !objective_limit_reached_ &&
396 (num_iterations_ == 0 ||
397 num_iterations_ < parameters_.max_number_of_iterations()) &&
398 !time_limit->LimitReached() &&
399 !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
400 (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
401 problem_status_ == ProblemStatus::DUAL_FEASIBLE);
402 ++num_optims) {
403 if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
404 // Run the primal simplex.
405 GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
406 } else {
407 // Run the dual simplex.
409 DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
410 }
411
412 // PrimalMinimize() or DualMinimize() always double check the result with
413 // maximum precision by refactoring the basis before exiting (except if an
414 // iteration or time limit was reached).
415 DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
416 problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
417 basis_factorization_.IsRefactorized());
418
419 // If SetIntegralityScale() was called, we perform a polish operation.
420 if (!integrality_scale_.empty() &&
421 problem_status_ == ProblemStatus::OPTIMAL) {
423 }
424
425 // Remove the bound and cost shifts (or perturbations).
426 //
427 // Note(user): Currently, we never do both at the same time, so we could
428 // be a bit faster here, but then this is quick anyway.
429 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
430 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
431 PermuteBasis();
432 variable_values_.RecomputeBasicVariableValues();
433 reduced_costs_.ClearAndRemoveCostShifts();
434
435 DisplayErrors();
436
437 // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
438 // status by checking with the other phase I that the problem is really
439 // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
440 // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
441 // OPTIMAL and the dual does not have issues on this problem.
442 //
443 // TODO(user): There is another issue on infeas/qual.mps. I think we should
444 // just check the dual ray, not really the current solution dual
445 // feasibility.
446 if (problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
447 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
448 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
449 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
450 variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
451 SOLVER_LOG(logger_,
452 "PRIMAL_UNBOUNDED was reported, but the residual and/or "
453 "dual infeasibility is above the tolerance");
454 if (parameters_.change_status_to_imprecise()) {
455 problem_status_ = ProblemStatus::IMPRECISE;
456 }
457 break;
458 }
459
460 // All of our tolerance are okay, but the dual ray might be fishy. This
461 // happens on l30.mps and on L1_sixm250obs.mps.gz. If the ray do not
462 // seems good enough, we might actually just be at the optimal and have
463 // trouble going down to our relatively low default tolerances.
464 //
465 // The difference bettween optimal and unbounded can be thin. Say you
466 // have a free variable with no constraint and a cost of epsilon,
467 // depending on epsilon and your tolerance, this will either cause the
468 // problem to be unbounded, or can be ignored.
469 //
470 // Here, we compute what is the cost gain if we move from the current
471 // value with the ray up to the bonds + tolerance. If this gain is < 1,
472 // it is hard to claim we are really unbounded. This is a quick
473 // heuristic to error on the side of optimality rather than
474 // unboundedness.
475 double max_magnitude = 0.0;
476 double min_distance = kInfinity;
477 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
478 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
479 double cost_delta = 0.0;
480 for (ColIndex col(0); col < num_cols_; ++col) {
481 cost_delta += solution_primal_ray_[col] * objective_[col];
482 if (solution_primal_ray_[col] > 0 && upper_bounds[col] != kInfinity) {
483 const Fractional value = variable_values_.Get(col);
484 const Fractional distance = (upper_bounds[col] - value + tolerance) /
485 solution_primal_ray_[col];
486 min_distance = std::min(distance, min_distance);
487 max_magnitude = std::max(solution_primal_ray_[col], max_magnitude);
488 }
489 if (solution_primal_ray_[col] < 0 && lower_bounds[col] != -kInfinity) {
490 const Fractional value = variable_values_.Get(col);
491 const Fractional distance = (value - lower_bounds[col] + tolerance) /
492 -solution_primal_ray_[col];
493 min_distance = std::min(distance, min_distance);
494 max_magnitude = std::max(-solution_primal_ray_[col], max_magnitude);
495 }
496 }
497 SOLVER_LOG(logger_, "Primal unbounded ray: max blocking magnitude = ",
498 max_magnitude, ", min distance to bound + ", tolerance, " = ",
499 min_distance, ", ray cost delta = ", cost_delta);
500 if (min_distance * std::abs(cost_delta) < 1 &&
501 reduced_costs_.ComputeMaximumDualInfeasibility() <= tolerance) {
502 SOLVER_LOG(logger_,
503 "PRIMAL_UNBOUNDED was reported, but the tolerance are good "
504 "and the unbounded ray is not great.");
505 SOLVER_LOG(logger_,
506 "The difference between unbounded and optimal can depend "
507 "on a slight change of tolerance, trying to see if we are "
508 "at OPTIMAL after postsolve.");
509 problem_status_ = ProblemStatus::OPTIMAL;
510 }
511 break;
512 }
513 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
514 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
515 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
516 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
517 reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
518 SOLVER_LOG(logger_,
519 "DUAL_UNBOUNDED was reported, but the residual and/or "
520 "dual infeasibility is above the tolerance");
521 if (parameters_.change_status_to_imprecise()) {
522 problem_status_ = ProblemStatus::IMPRECISE;
523 }
524 }
525
526 // Validate the dual ray that prove primal infeasibility.
527 //
528 // By taking the linear combination of the constraint, we should arrive
529 // to an infeasible <= 0 constraint using the variable bounds.
530 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
531 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
532 Fractional implied_lb = 0.0;
533 Fractional error = 0.0;
534 for (ColIndex col(0); col < num_cols_; ++col) {
535 const Fractional coeff = solution_dual_ray_row_combination_[col];
536 if (coeff > 0) {
537 if (lower_bounds[col] == -kInfinity) {
538 error = std::max(error, coeff);
539 } else {
540 implied_lb += coeff * lower_bounds[col];
541 }
542 } else if (coeff < 0) {
543 if (upper_bounds[col] == kInfinity) {
544 error = std::max(error, -coeff);
545 } else {
546 implied_lb += coeff * upper_bounds[col];
547 }
548 }
549 }
550 SOLVER_LOG(logger_, "Dual ray error=", error,
551 " infeasibility=", implied_lb);
552 if (implied_lb < tolerance || error > tolerance) {
553 SOLVER_LOG(logger_,
554 "DUAL_UNBOUNDED was reported, but the dual ray is not "
555 "proving infeasibility with high enough tolerance");
556 if (parameters_.change_status_to_imprecise()) {
557 problem_status_ = ProblemStatus::IMPRECISE;
558 }
559 }
560 break;
561 }
562
563 // Change the status, if after the shift and perturbation removal the
564 // problem is not OPTIMAL anymore.
565 if (problem_status_ == ProblemStatus::OPTIMAL) {
566 const Fractional solution_tolerance =
567 parameters_.solution_feasibility_tolerance();
568 const Fractional primal_residual =
569 variable_values_.ComputeMaximumPrimalResidual();
570 const Fractional dual_residual =
571 reduced_costs_.ComputeMaximumDualResidual();
572 if (primal_residual > solution_tolerance ||
573 dual_residual > solution_tolerance) {
574 SOLVER_LOG(logger_,
575 "OPTIMAL was reported, yet one of the residuals is "
576 "above the solution feasibility tolerance after the "
577 "shift/perturbation are removed.");
578 if (parameters_.change_status_to_imprecise()) {
579 problem_status_ = ProblemStatus::IMPRECISE;
580 }
581 } else {
582 // We use the "precise" tolerances here to try to report the best
583 // possible solution. Note however that we cannot really hope for an
584 // infeasibility lower than its corresponding residual error. Note that
585 // we already adapt the tolerance like this during the simplex
586 // execution.
587 const Fractional primal_tolerance = std::max(
588 primal_residual, parameters_.primal_feasibility_tolerance());
589 const Fractional dual_tolerance =
590 std::max(dual_residual, parameters_.dual_feasibility_tolerance());
591 const Fractional primal_infeasibility =
592 variable_values_.ComputeMaximumPrimalInfeasibility();
593 const Fractional dual_infeasibility =
594 reduced_costs_.ComputeMaximumDualInfeasibility();
595 if (primal_infeasibility > primal_tolerance &&
596 dual_infeasibility > dual_tolerance) {
597 SOLVER_LOG(logger_,
598 "OPTIMAL was reported, yet both of the infeasibility "
599 "are above the tolerance after the "
600 "shift/perturbation are removed.");
601 if (parameters_.change_status_to_imprecise()) {
602 problem_status_ = ProblemStatus::IMPRECISE;
603 }
604 } else if (primal_infeasibility > primal_tolerance) {
605 if (num_optims == parameters_.max_number_of_reoptimizations()) {
606 SOLVER_LOG(logger_,
607 "The primal infeasibility is still higher than the "
608 "requested internal tolerance, but the maximum "
609 "number of optimization is reached.");
610 break;
611 }
612 SOLVER_LOG(logger_, "");
613 SOLVER_LOG(logger_, "Re-optimizing with dual simplex ... ");
614 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
615 } else if (dual_infeasibility > dual_tolerance) {
616 if (num_optims == parameters_.max_number_of_reoptimizations()) {
617 SOLVER_LOG(logger_,
618 "The dual infeasibility is still higher than the "
619 "requested internal tolerance, but the maximum "
620 "number of optimization is reached.");
621 break;
622 }
623 SOLVER_LOG(logger_, "");
624 SOLVER_LOG(logger_, "Re-optimizing with primal simplex ... ");
625 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
626 }
627 }
628 }
629 }
630
631 // Check that the return status is "precise".
632 //
633 // TODO(user): we currently skip the DUAL_INFEASIBLE status because the
634 // quantities are not up to date in this case.
635 if (parameters_.change_status_to_imprecise() &&
636 problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
637 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
638 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
639 reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
640 problem_status_ = ProblemStatus::IMPRECISE;
641 } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
642 problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
643 problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
644 if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
645 problem_status_ = ProblemStatus::IMPRECISE;
646 }
647 } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
648 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
649 problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
650 if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
651 problem_status_ = ProblemStatus::IMPRECISE;
652 }
653 }
654 }
655
656 total_time_ = time_limit->GetElapsedTime() - start_time;
657 optimization_time_ = total_time_ - feasibility_time_;
658 num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
659
660 // If the user didn't provide starting variable values, then there is no need
661 // to check for super-basic variables.
662 if (!variable_starting_values_.empty()) {
663 const int num_super_basic = ComputeNumberOfSuperBasicVariables();
664 if (num_super_basic > 0) {
665 SOLVER_LOG(logger_,
666 "Num super-basic variables left after optimize phase: ",
667 num_super_basic);
668 if (parameters_.push_to_vertex()) {
669 if (problem_status_ == ProblemStatus::OPTIMAL) {
670 SOLVER_LOG(logger_, "");
671 phase_ = Phase::PUSH;
672 GLOP_RETURN_IF_ERROR(PrimalPush(time_limit));
673 // TODO(user): We should re-check for feasibility at this point and
674 // apply clean-up as needed.
675 } else {
676 SOLVER_LOG(logger_,
677 "Skipping push phase because optimize didn't succeed.");
678 }
679 }
680 }
681 }
682
683 total_time_ = time_limit->GetElapsedTime() - start_time;
684 push_time_ = total_time_ - feasibility_time_ - optimization_time_;
685 num_push_iterations_ = num_iterations_ - num_feasibility_iterations_ -
686 num_optimization_iterations_;
687
688 // Store the result for the solution getters.
689 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
690 solution_dual_values_ = reduced_costs_.GetDualValues();
691 solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
692 SaveState();
693
694 if (is_maximization_problem) {
695 ChangeSign(&solution_dual_values_);
696 ChangeSign(&solution_reduced_costs_);
697 }
698
699 // If the problem is unbounded, set the objective value to +/- infinity.
700 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
701 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
702 solution_objective_value_ =
703 (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
704 : -kInfinity;
705 if (is_maximization_problem) {
706 solution_objective_value_ = -solution_objective_value_;
707 }
708 }
709
710 variable_starting_values_.clear();
711 DisplayAllStats();
712 return Status::OK();
713}
714
716 return problem_status_;
717}
718
720 return solution_objective_value_;
721}
722
724 return num_iterations_;
725}
726
727RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
728
729ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
730
732 return variable_values_.Get(col);
733}
734
736 return solution_reduced_costs_[col];
737}
738
740 return solution_reduced_costs_;
741}
742
744 return solution_dual_values_[row];
745}
746
748 return variables_info_.GetStatusRow()[col];
749}
750
751const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
752
754 // Note the negative sign since the slack variable is such that
755 // constraint_activity + slack_value = 0.
756 return -variable_values_.Get(SlackColIndex(row));
757}
758
760 // The status of the given constraint is the same as the status of the
761 // associated slack variable with a change of sign.
762 const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
765 }
768 }
770}
771
773 DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
774 return solution_primal_ray_;
775}
777 DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
778 return solution_dual_ray_;
779}
780
782 DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
783 return solution_dual_ray_row_combination_;
784}
785
786ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
787
789 DCHECK(basis_factorization_.GetColumnPermutation().empty());
790 return basis_factorization_;
791}
792
793std::string RevisedSimplex::GetPrettySolverStats() const {
794 return absl::StrFormat(
795 "Problem status : %s\n"
796 "Solving time : %-6.4g\n"
797 "Number of iterations : %u\n"
798 "Time for solvability (first phase) : %-6.4g\n"
799 "Number of iterations for solvability : %u\n"
800 "Time for optimization : %-6.4g\n"
801 "Number of iterations for optimization : %u\n"
802 "Stop after first basis : %d\n",
803 GetProblemStatusString(problem_status_), total_time_, num_iterations_,
804 feasibility_time_, num_feasibility_iterations_, optimization_time_,
805 num_optimization_iterations_,
806 absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
807}
808
810 // TODO(user): Count what is missing.
811 return DeterministicTimeForFpOperations(num_update_price_operations_) +
812 basis_factorization_.DeterministicTime() +
813 update_row_.DeterministicTime() +
814 entering_variable_.DeterministicTime() +
815 reduced_costs_.DeterministicTime() +
816 primal_edge_norms_.DeterministicTime();
817}
818
819void RevisedSimplex::SetVariableNames() {
820 variable_name_.resize(num_cols_, "");
821 for (ColIndex col(0); col < first_slack_col_; ++col) {
822 const ColIndex var_index = col + 1;
823 variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
824 }
825 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
826 const ColIndex var_index = col - first_slack_col_ + 1;
827 variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
828 }
829}
830
831void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
832 ColIndex col, VariableStatus status) {
833 variables_info_.UpdateToNonBasicStatus(col, status);
834 variable_values_.SetNonBasicVariableValueFromStatus(col);
835}
836
837bool RevisedSimplex::BasisIsConsistent() const {
838 const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
839 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
840 for (RowIndex row(0); row < num_rows_; ++row) {
841 const ColIndex col = basis_[row];
842 if (!is_basic.IsSet(col)) return false;
843 if (variable_statuses[col] != VariableStatus::BASIC) return false;
844 }
845 ColIndex cols_in_basis(0);
846 ColIndex cols_not_in_basis(0);
847 for (ColIndex col(0); col < num_cols_; ++col) {
848 cols_in_basis += is_basic.IsSet(col);
849 cols_not_in_basis += !is_basic.IsSet(col);
850 if (is_basic.IsSet(col) !=
851 (variable_statuses[col] == VariableStatus::BASIC)) {
852 return false;
853 }
854 }
855 if (cols_in_basis != RowToColIndex(num_rows_)) return false;
856 if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
857 return true;
858}
859
860// Note(user): The basis factorization is not updated by this function but by
861// UpdateAndPivot().
862void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
863 VariableStatus leaving_variable_status) {
864 SCOPED_TIME_STAT(&function_stats_);
865 DCHECK_COL_BOUNDS(entering_col);
866 DCHECK_ROW_BOUNDS(basis_row);
867
868 // Check that this is not called with an entering_col already in the basis
869 // and that the leaving col is indeed in the basis.
870 DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
871 DCHECK_NE(basis_[basis_row], entering_col);
872 DCHECK_NE(basis_[basis_row], kInvalidCol);
873
874 const ColIndex leaving_col = basis_[basis_row];
875 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
876
877 // Make leaving_col leave the basis and update relevant data.
878 // Note thate the leaving variable value is not necessarily at its exact
879 // bound, which is like a bound shift.
880 variables_info_.UpdateToNonBasicStatus(leaving_col, leaving_variable_status);
881 DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
882 leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
883 leaving_variable_status == VariableStatus::FIXED_VALUE);
884
885 basis_[basis_row] = entering_col;
886 variables_info_.UpdateToBasicStatus(entering_col);
887 update_row_.Invalidate();
888}
889
890namespace {
891
892// Comparator used to sort column indices according to a given value vector.
893class ColumnComparator {
894 public:
895 explicit ColumnComparator(const DenseRow& value) : value_(value) {}
896 bool operator()(ColIndex col_a, ColIndex col_b) const {
897 return value_[col_a] < value_[col_b];
898 }
899
900 private:
901 const DenseRow& value_;
902};
903
904} // namespace
905
906// To understand better what is going on in this function, let us say that this
907// algorithm will produce the optimal solution to a problem containing only
908// singleton columns (provided that the variables start at the minimum possible
909// cost, see DefaultVariableStatus()). This is unit tested.
910//
911// The error_ must be equal to the constraint activity for the current variable
912// values before this function is called. If error_[row] is 0.0, that mean this
913// constraint is currently feasible.
914void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
915 SCOPED_TIME_STAT(&function_stats_);
916 // Computes the singleton columns and the cost variation of the corresponding
917 // variables (in the only possible direction, i.e. away from its current
918 // bound) for a unit change in the infeasibility of the corresponding row.
919 //
920 // Note that the slack columns will be treated as normal singleton columns.
921 std::vector<ColIndex> singleton_column;
922 DenseRow cost_variation(num_cols_, 0.0);
923 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
924 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
925 for (ColIndex col(0); col < num_cols_; ++col) {
926 if (compact_matrix_.column(col).num_entries() != 1) continue;
927 if (lower_bounds[col] == upper_bounds[col]) continue;
928 const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
929 if (variable_values_.Get(col) == lower_bounds[col]) {
930 cost_variation[col] = objective_[col] / std::abs(slope);
931 } else {
932 cost_variation[col] = -objective_[col] / std::abs(slope);
933 }
934 singleton_column.push_back(col);
935 }
936 if (singleton_column.empty()) return;
937
938 // Sort the singleton columns for the case where many of them correspond to
939 // the same row (equivalent to a piecewise-linear objective on this variable).
940 // Negative cost_variation first since moving the singleton variable away from
941 // its current bound means the least decrease in the objective function for
942 // the same "error" variation.
943 ColumnComparator comparator(cost_variation);
944 std::sort(singleton_column.begin(), singleton_column.end(), comparator);
945 DCHECK_LE(cost_variation[singleton_column.front()],
946 cost_variation[singleton_column.back()]);
947
948 // Use a singleton column to "absorb" the error when possible to avoid
949 // introducing unneeded artificial variables. Note that with scaling on, the
950 // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
951 // them) and that the SingletonColumnSignPreprocessor makes them all positive.
952 // However, this code works for any coefficient value.
953 const DenseRow& variable_values = variable_values_.GetDenseRow();
954 for (const ColIndex col : singleton_column) {
955 const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
956
957 // If no singleton columns have entered the basis for this row, choose the
958 // first one. It will be the one with the least decrease in the objective
959 // function when it leaves the basis.
960 if ((*basis)[row] == kInvalidCol) {
961 (*basis)[row] = col;
962 }
963
964 // If there is already no error in this row (i.e. it is primal-feasible),
965 // there is nothing to do.
966 if (error_[row] == 0.0) continue;
967
968 // In this case, all the infeasibility can be "absorbed" and this variable
969 // may not be at one of its bound anymore, so we have to use it in the
970 // basis.
971 const Fractional coeff =
972 compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
973 const Fractional new_value = variable_values[col] + error_[row] / coeff;
974 if (new_value >= lower_bounds[col] && new_value <= upper_bounds[col]) {
975 error_[row] = 0.0;
976
977 // Use this variable in the initial basis.
978 (*basis)[row] = col;
979 continue;
980 }
981
982 // The idea here is that if the singleton column cannot be used to "absorb"
983 // all error_[row], if it is boxed, it can still be used to make the
984 // infeasibility smaller (with a bound flip).
985 const Fractional box_width = variables_info_.GetBoundDifference(col);
986 DCHECK_NE(box_width, 0.0);
987 DCHECK_NE(error_[row], 0.0);
988 const Fractional error_sign = error_[row] / coeff;
989 if (variable_values[col] == lower_bounds[col] && error_sign > 0.0) {
990 DCHECK(IsFinite(box_width));
991 error_[row] -= coeff * box_width;
992 SetNonBasicVariableStatusAndDeriveValue(col,
994 continue;
995 }
996 if (variable_values[col] == upper_bounds[col] && error_sign < 0.0) {
997 DCHECK(IsFinite(box_width));
998 error_[row] += coeff * box_width;
999 SetNonBasicVariableStatusAndDeriveValue(col,
1001 continue;
1002 }
1003 }
1004}
1005
1006bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
1007 const LinearProgram& lp, bool lp_is_in_equation_form,
1008 bool* only_change_is_new_rows, bool* only_change_is_new_cols,
1009 ColIndex* num_new_cols) {
1010 SCOPED_TIME_STAT(&function_stats_);
1011 DCHECK(only_change_is_new_rows != nullptr);
1012 DCHECK(only_change_is_new_cols != nullptr);
1013 DCHECK(num_new_cols != nullptr);
1014 DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
1015 DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
1016
1017 // This works whether the lp is in equation form (with slack) or not.
1018 const bool old_part_of_matrix_is_unchanged =
1020 num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
1021
1022 // This is the only adaptation we need for the test below.
1023 const ColIndex lp_first_slack =
1024 lp_is_in_equation_form ? lp.GetFirstSlackVariable() : lp.num_variables();
1025
1026 // Test if the matrix is unchanged, and if yes, just returns true. Note that
1027 // this doesn't check the columns corresponding to the slack variables,
1028 // because they were checked by lp.IsInEquationForm() when Solve() was called.
1029 if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
1030 lp_first_slack == first_slack_col_) {
1031 // Tricky: if the parameters "use_transposed_matrix" changed since last call
1032 // we want to reflect the current state. We use the empty transposed matrix
1033 // to detect that. Recomputing the transpose when the matrix is empty is not
1034 // really a big overhead.
1035 if (parameters_.use_transposed_matrix()) {
1036 if (transposed_matrix_.IsEmpty()) {
1037 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
1038 }
1039 } else {
1040 transposed_matrix_.Reset(RowIndex(0));
1041 }
1042 return true;
1043 }
1044
1045 // Check if the new matrix can be derived from the old one just by adding
1046 // new rows (i.e new constraints).
1047 *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
1048 lp.num_constraints() > num_rows_ &&
1049 lp_first_slack == first_slack_col_;
1050
1051 // Check if the new matrix can be derived from the old one just by adding
1052 // new columns (i.e new variables).
1053 *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
1054 lp.num_constraints() == num_rows_ &&
1055 lp_first_slack > first_slack_col_;
1056 *num_new_cols = *only_change_is_new_cols ? lp_first_slack - first_slack_col_
1057 : ColIndex(0);
1058
1059 // Initialize first_slack_.
1060 first_slack_col_ = lp_first_slack;
1061
1062 // Initialize the new dimensions.
1063 num_rows_ = lp.num_constraints();
1064 num_cols_ = lp_first_slack + RowToColIndex(lp.num_constraints());
1065
1066 // Populate compact_matrix_ and transposed_matrix_ if needed.
1067 if (lp_is_in_equation_form) {
1068 // TODO(user): This can be sped up by removing the MatrixView, but then
1069 // this path will likely go away.
1070 compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
1071 } else {
1072 compact_matrix_.PopulateFromSparseMatrixAndAddSlacks(lp.GetSparseMatrix());
1073 }
1074 if (parameters_.use_transposed_matrix()) {
1075 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
1076 } else {
1077 transposed_matrix_.Reset(RowIndex(0));
1078 }
1079 return false;
1080}
1081
1082// Preconditions: This should only be called if there are only new variable
1083// in the lp.
1084bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1085 const LinearProgram& lp, bool lp_is_in_equation_form,
1086 ColIndex num_new_cols) {
1087 SCOPED_TIME_STAT(&function_stats_);
1088 DCHECK_LE(num_new_cols, first_slack_col_);
1089 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1090
1091 // Check the original variable bounds.
1092 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1093 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1094 for (ColIndex col(0); col < first_new_col; ++col) {
1095 if (lower_bounds[col] != lp.variable_lower_bounds()[col] ||
1096 upper_bounds[col] != lp.variable_upper_bounds()[col]) {
1097 return false;
1098 }
1099 }
1100
1101 // Check that each new variable has a bound of zero.
1102 for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
1103 if (lp.variable_lower_bounds()[col] != 0.0 &&
1104 lp.variable_upper_bounds()[col] != 0.0) {
1105 return false;
1106 }
1107 }
1108
1109 // Check that the slack bounds are unchanged.
1110 if (lp_is_in_equation_form) {
1111 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
1112 if (lower_bounds[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
1113 upper_bounds[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
1114 return false;
1115 }
1116 }
1117 } else {
1118 DCHECK_EQ(num_rows_, lp.num_constraints());
1119 for (RowIndex row(0); row < num_rows_; ++row) {
1120 const ColIndex col = first_slack_col_ + RowToColIndex(row);
1121 if (lower_bounds[col - num_new_cols] !=
1122 -lp.constraint_upper_bounds()[row] ||
1123 upper_bounds[col - num_new_cols] !=
1124 -lp.constraint_lower_bounds()[row]) {
1125 return false;
1126 }
1127 }
1128 }
1129 return true;
1130}
1131
1132bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
1133 const LinearProgram& lp) {
1134 SCOPED_TIME_STAT(&function_stats_);
1135
1136 bool objective_is_unchanged = true;
1137 objective_.resize(num_cols_, 0.0);
1138
1139 // This function work whether the lp is in equation form (with slack) or
1140 // without, since the objective of the slacks are always zero.
1141 DCHECK_GE(num_cols_, lp.num_variables());
1142
1143 const auto obj_coeffs = lp.objective_coefficients().const_view();
1144 for (ColIndex col(obj_coeffs.size()); col < num_cols_; ++col) {
1145 if (objective_[col] != 0.0) {
1146 objective_is_unchanged = false;
1147 objective_[col] = 0.0;
1148 }
1149 }
1150
1151 if (lp.IsMaximizationProblem()) {
1152 // Note that we use the minimization version of the objective internally.
1153 for (ColIndex col(0); col < obj_coeffs.size(); ++col) {
1154 const Fractional coeff = -obj_coeffs[col];
1155 if (objective_[col] != coeff) {
1156 objective_is_unchanged = false;
1157 objective_[col] = coeff;
1158 }
1159 }
1160 objective_offset_ = -lp.objective_offset();
1161 objective_scaling_factor_ = -lp.objective_scaling_factor();
1162 } else {
1163 for (ColIndex col(0); col < obj_coeffs.size(); ++col) {
1164 const Fractional coeff = obj_coeffs[col];
1165 if (objective_[col] != coeff) {
1166 objective_is_unchanged = false;
1167 objective_[col] = coeff;
1168 }
1169 }
1170 objective_offset_ = lp.objective_offset();
1171 objective_scaling_factor_ = lp.objective_scaling_factor();
1172 }
1173
1174 return objective_is_unchanged;
1175}
1176
1177void RevisedSimplex::InitializeObjectiveLimit() {
1178 objective_limit_reached_ = false;
1179 DCHECK(std::isfinite(objective_offset_));
1180 DCHECK(std::isfinite(objective_scaling_factor_));
1181 DCHECK_NE(0.0, objective_scaling_factor_);
1182
1183 // This sets dual_objective_limit_ and then primal_objective_limit_.
1184 for (const bool set_dual : {true, false}) {
1185 // NOTE(user): If objective_scaling_factor_ is negative, the optimization
1186 // direction was reversed (during preprocessing or inside revised simplex),
1187 // i.e., the original problem is maximization. In such case the _meaning_ of
1188 // the lower and upper limits is swapped. To this end we must change the
1189 // signs of limits, which happens automatically when calculating shifted
1190 // limits. We must also use upper (resp. lower) limit in place of lower
1191 // (resp. upper) limit when calculating the final objective_limit_.
1192 //
1193 // Choose lower limit if using the dual simplex and scaling factor is
1194 // negative or if using the primal simplex and scaling is nonnegative, upper
1195 // limit otherwise.
1196 const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
1197 ? parameters_.objective_lower_limit()
1198 : parameters_.objective_upper_limit();
1199 const Fractional shifted_limit =
1200 limit / objective_scaling_factor_ - objective_offset_;
1201 if (set_dual) {
1202 dual_objective_limit_ = shifted_limit;
1203 } else {
1204 primal_objective_limit_ = shifted_limit;
1205 }
1206 }
1207}
1208
1209// This implementation starts with an initial matrix B equal to the identity
1210// matrix (modulo a column permutation). For that it uses either the slack
1211// variables or the singleton columns present in the problem. Afterwards, the
1212// fixed slacks in the basis are exchanged with normal columns of A if possible
1213// by the InitialBasis class.
1214Status RevisedSimplex::CreateInitialBasis() {
1215 SCOPED_TIME_STAT(&function_stats_);
1216
1217 // Initialize the variable values and statuses.
1218 // Note that for the dual algorithm, boxed variables will be made
1219 // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
1220 // really matter at which of their two finite bounds they start.
1221 variables_info_.InitializeToDefaultStatus();
1222 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1223
1224 // Start by using an all-slack basis.
1225 RowToColMapping basis(num_rows_, kInvalidCol);
1226 for (RowIndex row(0); row < num_rows_; ++row) {
1227 basis[row] = SlackColIndex(row);
1228 }
1229
1230 // If possible, for the primal simplex we replace some slack variables with
1231 // some singleton columns present in the problem.
1232 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1233 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1234 if (!parameters_.use_dual_simplex() &&
1235 parameters_.initial_basis() != GlopParameters::MAROS &&
1236 parameters_.exploit_singleton_column_in_initial_basis()) {
1237 // For UseSingletonColumnInInitialBasis() to work better, we change
1238 // the value of the boxed singleton column with a non-zero cost to the best
1239 // of their two bounds.
1240 for (ColIndex col(0); col < num_cols_; ++col) {
1241 if (compact_matrix_.column(col).num_entries() != 1) continue;
1242 const VariableStatus status = variables_info_.GetStatusRow()[col];
1243 const Fractional objective = objective_[col];
1244 if (objective > 0 && IsFinite(lower_bounds[col]) &&
1246 SetNonBasicVariableStatusAndDeriveValue(col,
1248 } else if (objective < 0 && IsFinite(upper_bounds[col]) &&
1250 SetNonBasicVariableStatusAndDeriveValue(col,
1252 }
1253 }
1254
1255 // Compute the primal infeasibility of the initial variable values in
1256 // error_.
1257 ComputeVariableValuesError();
1258
1259 // TODO(user): A better but slightly more complex algorithm would be to:
1260 // - Ignore all singleton columns except the slacks during phase I.
1261 // - For this, change the slack variable bounds accordingly.
1262 // - At the end of phase I, restore the slack variable bounds and perform
1263 // the same algorithm to start with feasible and "optimal" values of the
1264 // singleton columns.
1265 basis.assign(num_rows_, kInvalidCol);
1266 UseSingletonColumnInInitialBasis(&basis);
1267
1268 // Eventually complete the basis with fixed slack columns.
1269 for (RowIndex row(0); row < num_rows_; ++row) {
1270 if (basis[row] == kInvalidCol) {
1271 basis[row] = SlackColIndex(row);
1272 }
1273 }
1274 }
1275
1276 // Use an advanced initial basis to remove the fixed variables from the basis.
1277 if (parameters_.initial_basis() == GlopParameters::NONE) {
1278 return InitializeFirstBasis(basis);
1279 }
1280 if (parameters_.initial_basis() == GlopParameters::MAROS) {
1281 InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1282 upper_bounds, variables_info_.GetTypeRow());
1283 if (parameters_.use_dual_simplex()) {
1284 // This dual version only uses zero-cost columns to complete the
1285 // basis.
1286 initial_basis.GetDualMarosBasis(num_cols_, &basis);
1287 } else {
1288 initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1289 }
1290 int number_changed = 0;
1291 for (RowIndex row(0); row < num_rows_; ++row) {
1292 if (basis[row] != SlackColIndex(row)) {
1293 number_changed++;
1294 }
1295 }
1296 VLOG(1) << "Number of Maros basis changes: " << number_changed;
1297 } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1298 parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1299 // First unassign the fixed variables from basis.
1300 int num_fixed_variables = 0;
1301 for (RowIndex row(0); row < basis.size(); ++row) {
1302 const ColIndex col = basis[row];
1303 if (lower_bounds[col] == upper_bounds[col]) {
1304 basis[row] = kInvalidCol;
1305 ++num_fixed_variables;
1306 }
1307 }
1308
1309 if (num_fixed_variables == 0) {
1310 SOLVER_LOG(logger_, "Crash is set to ", parameters_.initial_basis(),
1311 " but there is no equality rows to remove from initial all "
1312 "slack basis. Starting from there.");
1313 } else {
1314 // Then complete the basis with an advanced initial basis algorithm.
1315 SOLVER_LOG(logger_, "Trying to remove ", num_fixed_variables,
1316 " fixed variables from the initial basis.");
1317 InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1318 upper_bounds, variables_info_.GetTypeRow());
1319
1320 if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1321 if (parameters_.use_scaling()) {
1322 initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1323 } else {
1324 VLOG(1) << "Bixby initial basis algorithm requires the problem "
1325 << "to be scaled. Skipping Bixby's algorithm.";
1326 }
1327 } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1328 // Note the use of num_cols_ here because this algorithm
1329 // benefits from treating fixed slack columns like any other column.
1330 if (parameters_.use_dual_simplex()) {
1331 // This dual version only uses zero-cost columns to complete the
1332 // basis.
1333 initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1334 } else {
1335 initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1336 }
1337
1338 const Status status = InitializeFirstBasis(basis);
1339 if (status.ok()) {
1340 return status;
1341 } else {
1342 SOLVER_LOG(
1343 logger_,
1344 "Advanced basis algo failed, Reverting to all slack basis.");
1345
1346 for (RowIndex row(0); row < num_rows_; ++row) {
1347 basis[row] = SlackColIndex(row);
1348 }
1349 }
1350 }
1351 }
1352 } else {
1353 LOG(WARNING) << "Unsupported initial_basis parameters: "
1354 << parameters_.initial_basis();
1355 }
1356
1357 return InitializeFirstBasis(basis);
1358}
1359
1360Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1361 basis_ = basis;
1362
1363 // For each row which does not have a basic column, assign it to the
1364 // corresponding slack column.
1365 basis_.resize(num_rows_, kInvalidCol);
1366 for (RowIndex row(0); row < num_rows_; ++row) {
1367 if (basis_[row] == kInvalidCol) {
1368 basis_[row] = SlackColIndex(row);
1369 }
1370 }
1371
1372 GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1373 PermuteBasis();
1374
1375 // Test that the upper bound on the condition number of basis is not too high.
1376 // The number was not computed by any rigorous analysis, we just prefer to
1377 // revert to the all slack basis if the condition number of our heuristic
1378 // first basis seems bad. See for instance on cond11.mps, where we get an
1379 // infinity upper bound.
1380 const Fractional condition_number_ub =
1381 basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1382 if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1383 const std::string error_message =
1384 absl::StrCat("The matrix condition number upper bound is too high: ",
1385 condition_number_ub);
1386 SOLVER_LOG(logger_, error_message);
1387 return Status(Status::ERROR_LU, error_message);
1388 }
1389
1390 // Everything is okay, finish the initialization.
1391 for (RowIndex row(0); row < num_rows_; ++row) {
1392 variables_info_.UpdateToBasicStatus(basis_[row]);
1393 }
1394 DCHECK(BasisIsConsistent());
1395
1396 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1397 variable_values_.RecomputeBasicVariableValues();
1398
1399 if (logger_->LoggingIsEnabled()) {
1400 // TODO(user): Maybe return an error status if this is too high. Note
1401 // however that if we want to do that, we need to reset variables_info_ to a
1402 // consistent state.
1403 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1404 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1405 SOLVER_LOG(
1406 logger_,
1407 "The primal residual of the initial basis is above the tolerance, ",
1408 variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1409 }
1410 }
1411 return Status::OK();
1412}
1413
1414Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1415 parameters_ = initial_parameters_;
1416 PropagateParameters();
1417
1418 // We accept both kind of input.
1419 //
1420 // TODO(user): Ideally there should be no need to ever put the slack in the
1421 // LinearProgram. That take extra memory (one big SparseColumn per slack) and
1422 // just add visible overhead in incremental solve when one wants to add/remove
1423 // constraints. But for historical reason, we handle both for now.
1424 const bool lp_is_in_equation_form = lp.IsInEquationForm();
1425
1426 // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1427 // this is where num_rows_ and num_cols_ are computed.
1428 //
1429 // Note that these functions can't depend on use_dual_simplex() since we may
1430 // change it below.
1431 ColIndex num_new_cols(0);
1432 bool only_change_is_new_rows = false;
1433 bool only_change_is_new_cols = false;
1434 const bool matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1435 lp, lp_is_in_equation_form, &only_change_is_new_rows,
1436 &only_change_is_new_cols, &num_new_cols);
1437 const bool only_new_bounds =
1438 only_change_is_new_cols && num_new_cols > 0 &&
1439 OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1440 lp, lp_is_in_equation_form, num_new_cols);
1441
1442 // TODO(user): move objective with ReducedCosts class.
1443 const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1444
1445 const bool bounds_are_unchanged =
1446 lp_is_in_equation_form
1447 ? variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1448 lp.variable_lower_bounds(), lp.variable_upper_bounds())
1449 : variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1450 lp.variable_lower_bounds(), lp.variable_upper_bounds(),
1451 lp.constraint_lower_bounds(), lp.constraint_upper_bounds());
1452
1453 // If parameters_.allow_simplex_algorithm_change() is true and we already have
1454 // a primal (resp. dual) feasible solution, then we use the primal (resp.
1455 // dual) algorithm since there is a good chance that it will be faster.
1456 if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1457 if (objective_is_unchanged && !bounds_are_unchanged) {
1458 parameters_.set_use_dual_simplex(true);
1459 PropagateParameters();
1460 }
1461 if (bounds_are_unchanged && !objective_is_unchanged) {
1462 parameters_.set_use_dual_simplex(false);
1463 PropagateParameters();
1464 }
1465 }
1466
1467 InitializeObjectiveLimit();
1468
1469 // Computes the variable name as soon as possible for logging.
1470 // TODO(user): do we really need to store them? we could just compute them
1471 // on the fly since we do not need the speed.
1472 if (VLOG_IS_ON(2)) {
1473 SetVariableNames();
1474 }
1475
1476 // Warm-start? This is supported only if the solution_state_ is non empty,
1477 // i.e., this revised simplex i) was already used to solve a problem, or
1478 // ii) the solution state was provided externally. Note that the
1479 // solution_state_ may have nothing to do with the current problem, e.g.,
1480 // objective, matrix, and/or bounds had changed. So we support several
1481 // scenarios of warm-start depending on how did the problem change and which
1482 // simplex algorithm is used (primal or dual).
1483 bool solve_from_scratch = true;
1484
1485 // Try to perform a "quick" warm-start with no matrix factorization involved.
1486 if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1487 if (!parameters_.use_dual_simplex()) {
1488 // With primal simplex, always clear dual norms and dual pricing.
1489 // Incrementality is supported only if only change to the matrix and
1490 // bounds is adding new columns (objective may change), and that all
1491 // new columns have a bound equal to zero.
1492 dual_edge_norms_.Clear();
1493 dual_pricing_vector_.clear();
1494 if (matrix_is_unchanged && bounds_are_unchanged) {
1495 // TODO(user): Do not do that if objective_is_unchanged. Currently
1496 // this seems to break something. Investigate.
1497 reduced_costs_.ClearAndRemoveCostShifts();
1498 solve_from_scratch = false;
1499 } else if (only_change_is_new_cols && only_new_bounds) {
1500 variables_info_.InitializeFromBasisState(first_slack_col_, num_new_cols,
1501 solution_state_);
1502 variable_values_.ResetAllNonBasicVariableValues(
1503 variable_starting_values_);
1504
1505 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1506 for (ColIndex& col_ref : basis_) {
1507 if (col_ref >= first_new_col) {
1508 col_ref += num_new_cols;
1509 }
1510 }
1511
1512 // Make sure the primal edge norm are recomputed from scratch.
1513 // TODO(user): only the norms of the new columns actually need to be
1514 // computed.
1515 primal_edge_norms_.Clear();
1516 reduced_costs_.ClearAndRemoveCostShifts();
1517 solve_from_scratch = false;
1518 }
1519 } else {
1520 // With dual simplex, always clear primal norms. Incrementality is
1521 // supported only if the objective remains the same (the matrix may
1522 // contain new rows and the bounds may change).
1523 primal_edge_norms_.Clear();
1524 if (objective_is_unchanged) {
1525 if (matrix_is_unchanged) {
1526 if (!bounds_are_unchanged) {
1527 variables_info_.InitializeFromBasisState(
1528 first_slack_col_, ColIndex(0), solution_state_);
1529 variable_values_.ResetAllNonBasicVariableValues(
1530 variable_starting_values_);
1531 variable_values_.RecomputeBasicVariableValues();
1532 }
1533 solve_from_scratch = false;
1534 } else if (only_change_is_new_rows) {
1535 // For the dual-simplex, we also perform a warm start if a couple of
1536 // new rows where added.
1537 variables_info_.InitializeFromBasisState(
1538 first_slack_col_, ColIndex(0), solution_state_);
1539 dual_edge_norms_.ResizeOnNewRows(num_rows_);
1540
1541 // TODO(user): The reduced costs do not really need to be recomputed.
1542 // We just need to initialize the ones of the new slack variables to
1543 // 0.
1544 reduced_costs_.ClearAndRemoveCostShifts();
1545 dual_pricing_vector_.clear();
1546
1547 // Note that this needs to be done after the Clear() calls above.
1548 if (InitializeFirstBasis(basis_).ok()) {
1549 solve_from_scratch = false;
1550 }
1551 }
1552 }
1553 }
1554 }
1555
1556 return FinishInitialization(solve_from_scratch);
1557}
1558
1559Status RevisedSimplex::FinishInitialization(bool solve_from_scratch) {
1560 // If we couldn't perform a "quick" warm start above, we can at least try to
1561 // reuse the variable statuses.
1562 if (solve_from_scratch && !solution_state_.IsEmpty()) {
1563 basis_factorization_.Clear();
1564 reduced_costs_.ClearAndRemoveCostShifts();
1565 primal_edge_norms_.Clear();
1566 dual_edge_norms_.Clear();
1567 dual_pricing_vector_.clear();
1568
1569 // If an external basis has been provided or if the matrix changed, we need
1570 // to perform more work, e.g., factorize the proposed basis and validate it.
1571 variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
1572 solution_state_);
1573
1574 // Use the set of basic columns as a "hint" to construct the first basis.
1575 std::vector<ColIndex> candidates;
1576 for (const ColIndex col : variables_info_.GetIsBasicBitRow()) {
1577 candidates.push_back(col);
1578 }
1579 SOLVER_LOG(logger_, "The warm-start state contains ", candidates.size(),
1580 " candidates for the basis (num_rows = ", num_rows_.value(),
1581 ").");
1582
1583 // Optimization: Try to factorize it right away if we have the correct
1584 // number of element. Ideally the other path below would no require a
1585 // "double" factorization effort, so this would not be needed.
1586 if (candidates.size() == num_rows_) {
1587 basis_.clear();
1588 for (const ColIndex col : candidates) {
1589 basis_.push_back(col);
1590 }
1591
1592 // TODO(user): Depending on the error here, there is no point doing extra
1593 // work below. This is the case when we fail because of a bad initial
1594 // condition number for instance.
1595 if (InitializeFirstBasis(basis_).ok()) {
1596 solve_from_scratch = false;
1597 }
1598 }
1599
1600 if (solve_from_scratch) {
1601 basis_ = basis_factorization_.ComputeInitialBasis(candidates);
1602 const int num_super_basic =
1603 variables_info_.ChangeUnusedBasicVariablesToFree(basis_);
1604 const int num_snapped = variables_info_.SnapFreeVariablesToBound(
1605 parameters_.crossover_bound_snapping_distance(),
1606 variable_starting_values_);
1607 if (logger_->LoggingIsEnabled()) {
1608 SOLVER_LOG(logger_, "The initial basis did not use ",
1609 " BASIC columns from the initial state and used ",
1610 (num_rows_ - (candidates.size() - num_super_basic)).value(),
1611 " slack variables that were not marked BASIC.");
1612 if (num_snapped > 0) {
1613 SOLVER_LOG(logger_, num_snapped,
1614 " of the FREE variables where moved to their bound.");
1615 }
1616 }
1617
1618 if (InitializeFirstBasis(basis_).ok()) {
1619 solve_from_scratch = false;
1620 } else {
1621 SOLVER_LOG(logger_,
1622 "RevisedSimplex is not using the warm start "
1623 "basis because it is not factorizable.");
1624 }
1625 }
1626 }
1627
1628 if (solve_from_scratch) {
1629 SOLVER_LOG(logger_, "Starting basis: create from scratch.");
1630 basis_factorization_.Clear();
1631 reduced_costs_.ClearAndRemoveCostShifts();
1632 primal_edge_norms_.Clear();
1633 dual_edge_norms_.Clear();
1634 dual_pricing_vector_.clear();
1635 GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1636 } else {
1637 SOLVER_LOG(logger_, "Starting basis: incremental solve.");
1638 }
1639 DCHECK(BasisIsConsistent());
1640
1641 transpose_was_changed_ = false;
1642 return Status::OK();
1643}
1644
1645void RevisedSimplex::DisplayBasicVariableStatistics() {
1646 SCOPED_TIME_STAT(&function_stats_);
1647
1648 int num_fixed_variables = 0;
1649 int num_free_variables = 0;
1650 int num_variables_at_bound = 0;
1651 int num_slack_variables = 0;
1652 int num_infeasible_variables = 0;
1653
1654 const DenseRow& variable_values = variable_values_.GetDenseRow();
1655 const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1656 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1657 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1658 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1659 for (RowIndex row(0); row < num_rows_; ++row) {
1660 const ColIndex col = basis_[row];
1661 const Fractional value = variable_values[col];
1662 if (variable_types[col] == VariableType::UNCONSTRAINED) {
1663 ++num_free_variables;
1664 }
1665 if (value > upper_bounds[col] + tolerance ||
1666 value < lower_bounds[col] - tolerance) {
1667 ++num_infeasible_variables;
1668 }
1669 if (col >= first_slack_col_) {
1670 ++num_slack_variables;
1671 }
1672 if (lower_bounds[col] == upper_bounds[col]) {
1673 ++num_fixed_variables;
1674 } else if (variable_values[col] == lower_bounds[col] ||
1675 variable_values[col] == upper_bounds[col]) {
1676 ++num_variables_at_bound;
1677 }
1678 }
1679
1680 SOLVER_LOG(logger_, "The matrix with slacks has ",
1681 compact_matrix_.num_rows().value(), " rows, ",
1682 compact_matrix_.num_cols().value(), " columns, ",
1683 compact_matrix_.num_entries().value(), " entries.");
1684 SOLVER_LOG(logger_, "Number of basic infeasible variables: ",
1685 num_infeasible_variables);
1686 SOLVER_LOG(logger_, "Number of basic slack variables: ", num_slack_variables);
1687 SOLVER_LOG(logger_,
1688 "Number of basic variables at bound: ", num_variables_at_bound);
1689 SOLVER_LOG(logger_, "Number of basic fixed variables: ", num_fixed_variables);
1690 SOLVER_LOG(logger_, "Number of basic free variables: ", num_free_variables);
1691 SOLVER_LOG(logger_, "Number of super-basic variables: ",
1692 ComputeNumberOfSuperBasicVariables());
1693}
1694
1695void RevisedSimplex::SaveState() {
1696 DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1697 solution_state_.statuses = variables_info_.GetStatusRow();
1698 solution_state_has_been_set_externally_ = false;
1699}
1700
1701RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1702 DenseBooleanColumn contains_data(num_rows_, false);
1703 for (ColIndex col(0); col < num_cols_; ++col) {
1704 for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1705 contains_data[e.row()] = true;
1706 }
1707 }
1708 RowIndex num_empty_rows(0);
1709 for (RowIndex row(0); row < num_rows_; ++row) {
1710 if (!contains_data[row]) {
1711 ++num_empty_rows;
1712 VLOG(2) << "Row " << row << " is empty.";
1713 }
1714 }
1715 return num_empty_rows;
1716}
1717
1718ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1719 ColIndex num_empty_cols(0);
1720 for (ColIndex col(0); col < num_cols_; ++col) {
1721 if (compact_matrix_.column(col).IsEmpty()) {
1722 ++num_empty_cols;
1723 VLOG(2) << "Column " << col << " is empty.";
1724 }
1725 }
1726 return num_empty_cols;
1727}
1728
1729int RevisedSimplex::ComputeNumberOfSuperBasicVariables() const {
1730 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
1731 int num_super_basic = 0;
1732 for (ColIndex col(0); col < num_cols_; ++col) {
1733 if (variable_statuses[col] == VariableStatus::FREE &&
1734 variable_values_.Get(col) != 0.0) {
1735 ++num_super_basic;
1736 }
1737 }
1738 return num_super_basic;
1739}
1740
1741void RevisedSimplex::CorrectErrorsOnVariableValues() {
1742 SCOPED_TIME_STAT(&function_stats_);
1743 DCHECK(basis_factorization_.IsRefactorized());
1744
1745 // TODO(user): The primal residual error does not change if we take degenerate
1746 // steps or if we do not change the variable values. No need to recompute it
1747 // in this case.
1748 const Fractional primal_residual =
1749 variable_values_.ComputeMaximumPrimalResidual();
1750
1751 // If the primal_residual is within the tolerance, no need to recompute
1752 // the basic variable values with a better precision.
1753 if (primal_residual >= parameters_.harris_tolerance_ratio() *
1754 parameters_.primal_feasibility_tolerance()) {
1755 variable_values_.RecomputeBasicVariableValues();
1756 VLOG(1) << "Primal infeasibility (bounds error) = "
1757 << variable_values_.ComputeMaximumPrimalInfeasibility()
1758 << ", Primal residual |A.x - b| = "
1759 << variable_values_.ComputeMaximumPrimalResidual();
1760 }
1761}
1762
1763void RevisedSimplex::ComputeVariableValuesError() {
1764 SCOPED_TIME_STAT(&function_stats_);
1765 error_.AssignToZero(num_rows_);
1766 const DenseRow& variable_values = variable_values_.GetDenseRow();
1767 for (ColIndex col(0); col < num_cols_; ++col) {
1768 const Fractional value = variable_values[col];
1769 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1770 }
1771}
1772
1773void RevisedSimplex::ComputeDirection(ColIndex col) {
1774 SCOPED_TIME_STAT(&function_stats_);
1775 DCHECK_COL_BOUNDS(col);
1776 basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1777 Fractional norm = 0.0;
1778 if (direction_.non_zeros.empty()) {
1779 // We still compute the direction non-zeros because our code relies on it.
1780 const RowIndex num_rows = num_rows_;
1781 const DenseColumn::ConstView direction = direction_.values.const_view();
1782 for (RowIndex row(0); row < num_rows; ++row) {
1783 const Fractional value = direction[row];
1784 if (value != 0.0) {
1785 direction_.non_zeros.push_back(row);
1786 norm = std::max(norm, std::abs(value));
1787 }
1788 }
1789 } else {
1790 for (const auto e : direction_) {
1791 norm = std::max(norm, std::abs(e.coefficient()));
1792 }
1793 }
1794 direction_infinity_norm_ = norm;
1795 IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1796 num_rows_ == 0 ? 0.0
1797 : static_cast<double>(direction_.non_zeros.size()) /
1798 static_cast<double>(num_rows_.value())));
1799}
1800
1801Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1802 SCOPED_TIME_STAT(&function_stats_);
1803 compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1804 for (const auto e : direction_) {
1805 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1806 &error_);
1807 }
1808 return InfinityNorm(error_);
1809}
1810
1811template <bool is_entering_reduced_cost_positive>
1812Fractional RevisedSimplex::GetRatio(const DenseRow& lower_bounds,
1813 const DenseRow& upper_bounds,
1814 RowIndex row) const {
1815 const ColIndex col = basis_[row];
1816 const Fractional direction = direction_[row];
1817 const Fractional value = variable_values_.Get(col);
1818 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1819 DCHECK_NE(direction, 0.0);
1820 if (is_entering_reduced_cost_positive) {
1821 if (direction > 0.0) {
1822 return (upper_bounds[col] - value) / direction;
1823 } else {
1824 return (lower_bounds[col] - value) / direction;
1825 }
1826 } else {
1827 if (direction > 0.0) {
1828 return (value - lower_bounds[col]) / direction;
1829 } else {
1830 return (value - upper_bounds[col]) / direction;
1831 }
1832 }
1833}
1834
1835template <bool is_entering_reduced_cost_positive>
1836Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1837 Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1838 SCOPED_TIME_STAT(&function_stats_);
1839 const Fractional harris_tolerance =
1840 parameters_.harris_tolerance_ratio() *
1841 parameters_.primal_feasibility_tolerance();
1842 const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1843 parameters_.primal_feasibility_tolerance();
1844
1845 // Initially, we can skip any variable with a ratio greater than
1846 // bound_flip_ratio since it seems to be always better to choose the
1847 // bound-flip over such leaving variable.
1848 Fractional harris_ratio = bound_flip_ratio;
1849 leaving_candidates->Clear();
1850
1851 // If the basis is refactorized, then we should have everything with a good
1852 // precision, so we only consider "acceptable" pivots. Otherwise we consider
1853 // all the entries, and if the algorithm return a pivot that is too small, we
1854 // will refactorize and recompute the relevant quantities.
1855 const Fractional threshold = basis_factorization_.IsRefactorized()
1856 ? parameters_.minimum_acceptable_pivot()
1857 : parameters_.ratio_test_zero_threshold();
1858
1859 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1860 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1861 for (const auto e : direction_) {
1862 const Fractional magnitude = std::abs(e.coefficient());
1863 if (magnitude <= threshold) continue;
1864 const Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(
1865 lower_bounds, upper_bounds, e.row());
1866 if (ratio <= harris_ratio) {
1867 leaving_candidates->SetCoefficient(e.row(), ratio);
1868
1869 // The second max() makes sure harris_ratio is lower bounded by a small
1870 // positive value. The more classical approach is to bound it by 0.0 but
1871 // since we will always perform a small positive step, we allow any
1872 // variable to go a bit more out of bound (even if it is past the harris
1873 // tolerance). This increase the number of candidates and allows us to
1874 // choose a more numerically stable pivot.
1875 //
1876 // Note that at least lower bounding it by 0.0 is really important on
1877 // numerically difficult problems because its helps in the choice of a
1878 // stable pivot.
1879 harris_ratio = std::min(harris_ratio,
1880 std::max(minimum_delta / magnitude,
1881 ratio + harris_tolerance / magnitude));
1882 }
1883 }
1884 return harris_ratio;
1885}
1886
1887namespace {
1888
1889// Returns true if the candidate ratio is supposed to be more stable than the
1890// current ratio (or if the two are equal).
1891// The idea here is to take, by order of preference:
1892// - the minimum positive ratio in order to intoduce a primal infeasibility
1893// which is as small as possible.
1894// - or the least negative one in order to have the smallest bound shift
1895// possible on the leaving variable.
1896bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1897 if (current >= 0.0) {
1898 return candidate >= 0.0 && candidate <= current;
1899 } else {
1900 return candidate >= current;
1901 }
1902}
1903
1904} // namespace
1905
1906// Ratio-test or Quotient-test. Choose the row of the leaving variable.
1907// Known as CHUZR or CHUZRO in FORTRAN codes.
1908Status RevisedSimplex::ChooseLeavingVariableRow(
1909 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1910 RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1911 SCOPED_TIME_STAT(&function_stats_);
1912 GLOP_RETURN_ERROR_IF_NULL(refactorize);
1913 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1914 GLOP_RETURN_ERROR_IF_NULL(step_length);
1915 DCHECK_COL_BOUNDS(entering_col);
1916 DCHECK_NE(0.0, reduced_cost);
1917
1918 // A few cases will cause the test to be recomputed from the beginning.
1919 int stats_num_leaving_choices = 0;
1920 equivalent_leaving_choices_.clear();
1921 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1922 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1923 while (true) {
1924 stats_num_leaving_choices = 0;
1925
1926 // We initialize current_ratio with the maximum step the entering variable
1927 // can take (bound-flip). Note that we do not use tolerance here.
1928 const Fractional entering_value = variable_values_.Get(entering_col);
1929 Fractional current_ratio =
1930 (reduced_cost > 0.0) ? entering_value - lower_bounds[entering_col]
1931 : upper_bounds[entering_col] - entering_value;
1932 DCHECK_GT(current_ratio, 0.0);
1933
1934 // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1935 // actually computes the minimum leaving ratio of all the variables. This is
1936 // the same as the 'classic' ratio test.
1937 const Fractional harris_ratio =
1938 (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1939 current_ratio, &leaving_candidates_)
1940 : ComputeHarrisRatioAndLeavingCandidates<false>(
1941 current_ratio, &leaving_candidates_);
1942
1943 // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1944 // variable too much out of bounds), we take it as it is always stable and
1945 // fast.
1946 if (current_ratio <= harris_ratio) {
1947 *leaving_row = kInvalidRow;
1948 *step_length = current_ratio;
1949 break;
1950 }
1951
1952 // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1953 // <= harris_ratio', we choose the leaving row with the largest coefficient.
1954 //
1955 // This has a big impact, because picking a leaving variable with a small
1956 // direction_[row] is the main source of Abnormal LU errors.
1957 Fractional pivot_magnitude = 0.0;
1958 stats_num_leaving_choices = 0;
1959 *leaving_row = kInvalidRow;
1960 equivalent_leaving_choices_.clear();
1961 for (const SparseColumn::Entry e : leaving_candidates_) {
1962 const Fractional ratio = e.coefficient();
1963 if (ratio > harris_ratio) continue;
1964 ++stats_num_leaving_choices;
1965 const RowIndex row = e.row();
1966
1967 // If the magnitudes are the same, we choose the leaving variable with
1968 // what is probably the more stable ratio, see
1969 // IsRatioMoreOrEquallyStable().
1970 const Fractional candidate_magnitude = std::abs(direction_[row]);
1971 if (candidate_magnitude < pivot_magnitude) continue;
1972 if (candidate_magnitude == pivot_magnitude) {
1973 if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1974 if (ratio == current_ratio) {
1975 DCHECK_NE(kInvalidRow, *leaving_row);
1976 equivalent_leaving_choices_.push_back(row);
1977 continue;
1978 }
1979 }
1980 equivalent_leaving_choices_.clear();
1981 current_ratio = ratio;
1982 pivot_magnitude = candidate_magnitude;
1983 *leaving_row = row;
1984 }
1985
1986 // Break the ties randomly.
1987 if (!equivalent_leaving_choices_.empty()) {
1988 equivalent_leaving_choices_.push_back(*leaving_row);
1989 *leaving_row =
1990 equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1991 0, equivalent_leaving_choices_.size() - 1)(random_)];
1992 }
1993
1994 // Since we took care of the bound-flip at the beginning, at this point
1995 // we have a valid leaving row.
1996 DCHECK_NE(kInvalidRow, *leaving_row);
1997
1998 // A variable already outside one of its bounds +/- tolerance is considered
1999 // at its bound and its ratio is zero. Not doing this may lead to a step
2000 // that moves the objective in the wrong direction. We may want to allow
2001 // such steps, but then we will need to check that it doesn't break the
2002 // bounds of the other variables.
2003 if (current_ratio <= 0.0) {
2004 // Instead of doing a zero step, we do a small positive step. This
2005 // helps on degenerate problems.
2006 const Fractional minimum_delta =
2007 parameters_.degenerate_ministep_factor() *
2008 parameters_.primal_feasibility_tolerance();
2009 *step_length = minimum_delta / pivot_magnitude;
2010 } else {
2011 *step_length = current_ratio;
2012 }
2013
2014 // Note(user): Testing the pivot at each iteration is useful for debugging
2015 // an LU factorization problem. Remove the false if you need to investigate
2016 // this, it makes sure that this will be compiled away.
2017 if (/* DISABLES CODE */ (false)) {
2018 TestPivot(entering_col, *leaving_row);
2019 }
2020
2021 // We try various "heuristics" to avoid a small pivot.
2022 //
2023 // The smaller 'direction_[*leaving_row]', the less precise
2024 // it is. So we want to avoid pivoting by such a row. Small pivots lead to
2025 // ill-conditioned bases or even to matrices that are not a basis at all if
2026 // the actual (infinite-precision) coefficient is zero.
2027 //
2028 // TODO(user): We may have to choose another entering column if
2029 // we cannot prevent pivoting by a small pivot.
2030 // (Chvatal, p.115, about epsilon2.)
2031 if (pivot_magnitude <
2032 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
2033 // The first countermeasure is to recompute everything to the best
2034 // precision we can in the hope of avoiding such a choice. Note that this
2035 // helps a lot on the Netlib problems.
2036 if (!basis_factorization_.IsRefactorized()) {
2037 VLOG(1) << "Refactorizing to avoid pivoting by "
2038 << direction_[*leaving_row]
2039 << " direction_infinity_norm_ = " << direction_infinity_norm_
2040 << " reduced cost = " << reduced_cost;
2041 *refactorize = true;
2042 return Status::OK();
2043 }
2044
2045 // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
2046 // we kwnow that this pivot will still have an acceptable magnitude.
2047 //
2048 // TODO(user): An issue left to fix is that if there is no such pivot at
2049 // all, then we will report unbounded even if this is not really the case.
2050 // As of 2018/07/18, this happens on l30.mps.
2051 VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
2052 << " direction_infinity_norm_ = " << direction_infinity_norm_
2053 << " reduced cost = " << reduced_cost;
2054 DCHECK_GE(std::abs(direction_[*leaving_row]),
2055 parameters_.minimum_acceptable_pivot());
2056 IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
2057 }
2058 break;
2059 }
2060
2061 // Update the target bound.
2062 if (*leaving_row != kInvalidRow) {
2063 const bool is_reduced_cost_positive = (reduced_cost > 0.0);
2064 const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
2065 *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
2066 ? upper_bounds[basis_[*leaving_row]]
2067 : lower_bounds[basis_[*leaving_row]];
2068 }
2069
2070 // Stats.
2072 ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
2073 if (!equivalent_leaving_choices_.empty()) {
2074 ratio_test_stats_.num_perfect_ties.Add(
2075 equivalent_leaving_choices_.size());
2076 }
2077 if (*leaving_row != kInvalidRow) {
2078 ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
2079 }
2080 });
2081 return Status::OK();
2082}
2083
2084namespace {
2085
2086// Store a row with its ratio, coefficient magnitude and target bound. This is
2087// used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
2088// details.
2089struct BreakPoint {
2090 BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
2091 Fractional _target_bound)
2092 : row(_row),
2093 ratio(_ratio),
2094 coeff_magnitude(_coeff_magnitude),
2095 target_bound(_target_bound) {}
2096
2097 // We want to process the breakpoints by increasing ratio and decreasing
2098 // coefficient magnitude (if the ratios are the same). Returns false if "this"
2099 // is before "other" in a priority queue.
2100 bool operator<(const BreakPoint& other) const {
2101 if (ratio == other.ratio) {
2102 if (coeff_magnitude == other.coeff_magnitude) {
2103 return row > other.row;
2104 }
2105 return coeff_magnitude < other.coeff_magnitude;
2106 }
2107 return ratio > other.ratio;
2108 }
2109
2110 RowIndex row;
2111 Fractional ratio;
2112 Fractional coeff_magnitude;
2113 Fractional target_bound;
2114};
2115
2116} // namespace
2117
2118void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
2119 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
2120 RowIndex* leaving_row, Fractional* step_length,
2121 Fractional* target_bound) const {
2122 SCOPED_TIME_STAT(&function_stats_);
2123 RETURN_IF_NULL(refactorize);
2124 RETURN_IF_NULL(leaving_row);
2125 RETURN_IF_NULL(step_length);
2126 DCHECK_COL_BOUNDS(entering_col);
2127 DCHECK_NE(0.0, reduced_cost);
2128 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2129 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2130
2131 // We initialize current_ratio with the maximum step the entering variable
2132 // can take (bound-flip). Note that we do not use tolerance here.
2133 const Fractional entering_value = variable_values_.Get(entering_col);
2134 Fractional current_ratio = (reduced_cost > 0.0)
2135 ? entering_value - lower_bounds[entering_col]
2136 : upper_bounds[entering_col] - entering_value;
2137 DCHECK_GT(current_ratio, 0.0);
2138
2139 std::vector<BreakPoint> breakpoints;
2140 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
2141 for (const auto e : direction_) {
2142 const Fractional direction =
2143 reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
2144 const Fractional magnitude = std::abs(direction);
2145 if (magnitude < tolerance) continue;
2146
2147 // Computes by how much we can add 'direction' to the basic variable value
2148 // with index 'row' until it changes of primal feasibility status. That is
2149 // from infeasible to feasible or from feasible to infeasible. Note that the
2150 // transition infeasible->feasible->infeasible is possible. We use
2151 // tolerances here, but when the step will be performed, it will move the
2152 // variable to the target bound (possibly taking a small negative step).
2153 //
2154 // Note(user): The negative step will only happen when the leaving variable
2155 // was slightly infeasible (less than tolerance). Moreover, the overall
2156 // infeasibility will not necessarily increase since it doesn't take into
2157 // account all the variables with an infeasibility smaller than the
2158 // tolerance, and here we will at least improve the one of the leaving
2159 // variable.
2160 const ColIndex col = basis_[e.row()];
2161 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
2162
2163 const Fractional value = variable_values_.Get(col);
2164 const Fractional lower_bound = lower_bounds[col];
2165 const Fractional upper_bound = upper_bounds[col];
2166 const Fractional to_lower = (lower_bound - tolerance - value) / direction;
2167 const Fractional to_upper = (upper_bound + tolerance - value) / direction;
2168
2169 // Enqueue the possible transitions. Note that the second tests exclude the
2170 // case where to_lower or to_upper are infinite.
2171 if (to_lower >= 0.0 && to_lower < current_ratio) {
2172 breakpoints.push_back(
2173 BreakPoint(e.row(), to_lower, magnitude, lower_bound));
2174 }
2175 if (to_upper >= 0.0 && to_upper < current_ratio) {
2176 breakpoints.push_back(
2177 BreakPoint(e.row(), to_upper, magnitude, upper_bound));
2178 }
2179 }
2180
2181 // Order the breakpoints by increasing ratio and decreasing coefficient
2182 // magnitude (if the ratios are the same).
2183 std::make_heap(breakpoints.begin(), breakpoints.end());
2184
2185 // Select the last breakpoint that still improves the infeasibility and has
2186 // the largest coefficient magnitude.
2187 Fractional improvement = std::abs(reduced_cost);
2188 Fractional best_magnitude = 0.0;
2189 *leaving_row = kInvalidRow;
2190 while (!breakpoints.empty()) {
2191 const BreakPoint top = breakpoints.front();
2192 // TODO(user): consider using >= here. That will lead to bigger ratio and
2193 // hence a better impact on the infeasibility. The drawback is that more
2194 // effort may be needed to update the reduced costs.
2195 //
2196 // TODO(user): Use a random tie breaking strategy for BreakPoint with
2197 // same ratio and same coefficient magnitude? Koberstein explains in his PhD
2198 // that it helped on the dual-simplex.
2199 if (top.coeff_magnitude > best_magnitude) {
2200 *leaving_row = top.row;
2201 current_ratio = top.ratio;
2202 best_magnitude = top.coeff_magnitude;
2203 *target_bound = top.target_bound;
2204 }
2205
2206 // As long as the sum of primal infeasibilities is decreasing, we look for
2207 // pivots that are numerically more stable.
2208 improvement -= top.coeff_magnitude;
2209 if (improvement <= 0.0) break;
2210 std::pop_heap(breakpoints.begin(), breakpoints.end());
2211 breakpoints.pop_back();
2212 }
2213
2214 // Try to avoid a small pivot by refactorizing.
2215 if (*leaving_row != kInvalidRow) {
2216 const Fractional threshold =
2217 parameters_.small_pivot_threshold() * direction_infinity_norm_;
2218 if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
2219 *refactorize = true;
2220 return;
2221 }
2222 }
2223 *step_length = current_ratio;
2224}
2225
2226// This implements the pricing step for the dual simplex.
2227Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
2228 Fractional* cost_variation,
2229 Fractional* target_bound) {
2230 SCOPED_TIME_STAT(&function_stats_);
2231 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2232 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2233 GLOP_RETURN_ERROR_IF_NULL(target_bound);
2234
2235 // This is not supposed to happen, but better be safe.
2236 if (dual_prices_.Size() == 0) {
2237 variable_values_.RecomputeDualPrices(
2238 parameters_.dual_price_prioritize_norm());
2239 }
2240
2241 // Return right away if there is no leaving variable.
2242 // Fill cost_variation and target_bound otherwise.
2243 *leaving_row = dual_prices_.GetMaximum();
2244 if (*leaving_row == kInvalidRow) return Status::OK();
2245
2246 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2247 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2248 const ColIndex leaving_col = basis_[*leaving_row];
2249 const Fractional value = variable_values_.Get(leaving_col);
2250 if (value < lower_bounds[leaving_col]) {
2251 *cost_variation = lower_bounds[leaving_col] - value;
2252 *target_bound = lower_bounds[leaving_col];
2253 DCHECK_GT(*cost_variation, 0.0);
2254 } else {
2255 *cost_variation = upper_bounds[leaving_col] - value;
2256 *target_bound = upper_bounds[leaving_col];
2257 DCHECK_LT(*cost_variation, 0.0);
2258 }
2259 return Status::OK();
2260}
2261
2262namespace {
2263
2264// Returns true if a basic variable with given cost and type is to be considered
2265// as a leaving candidate for the dual phase I.
2266bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
2267 Fractional threshold) {
2268 if (cost == 0.0) return false;
2271 (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
2272 (type == VariableType::LOWER_BOUNDED && cost > threshold);
2273}
2274
2275} // namespace
2276
2277// Important: The norm should be updated before this is called.
2278template <bool use_dense_update>
2279void RevisedSimplex::OnDualPriceChange(DenseColumn::ConstView squared_norm,
2280 RowIndex row, VariableType type,
2281 Fractional threshold) {
2282 const Fractional price = dual_pricing_vector_[row];
2283 const bool is_candidate =
2284 IsDualPhaseILeavingCandidate(price, type, threshold);
2285 if (is_candidate) {
2286 if (use_dense_update) {
2287 dual_prices_.DenseAddOrUpdate(row, Square(price) / squared_norm[row]);
2288 } else {
2289 dual_prices_.AddOrUpdate(row, Square(price) / squared_norm[row]);
2290 }
2291 } else {
2292 dual_prices_.Remove(row);
2293 }
2294}
2295
2296void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
2297 ColIndex entering_col) {
2298 SCOPED_TIME_STAT(&function_stats_);
2299
2300 // If the prices are going to be recomputed, there is nothing to do. See the
2301 // logic at the beginning of DualPhaseIChooseLeavingVariableRow() which must
2302 // be in sync with this one.
2303 //
2304 // TODO(user): Move the logic in a single class, so it is easier to enforce
2305 // invariant.
2306 if (reduced_costs_.AreReducedCostsRecomputed() ||
2307 dual_edge_norms_.NeedsBasisRefactorization() ||
2308 dual_pricing_vector_.empty()) {
2309 return;
2310 }
2311
2312 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2313 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2314
2315 // Note that because the norm are also updated only on the position of the
2316 // direction, scaled_dual_pricing_vector_ will be up to date.
2317 const DenseColumn::ConstView squared_norms =
2318 dual_edge_norms_.GetEdgeSquaredNorms();
2319
2320 // Convert the dual_pricing_vector_ from the old basis into the new one (which
2321 // is the same as multiplying it by an Eta matrix corresponding to the
2322 // direction).
2323 const Fractional step =
2324 dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2325 for (const auto e : direction_) {
2326 dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2327 OnDualPriceChange(squared_norms, e.row(), variable_type[basis_[e.row()]],
2328 threshold);
2329 }
2330 dual_pricing_vector_[leaving_row] = step;
2331
2332 // The entering_col which was dual-infeasible is now dual-feasible, so we
2333 // have to remove it from the infeasibility sum.
2334 dual_pricing_vector_[leaving_row] -=
2335 dual_infeasibility_improvement_direction_[entering_col];
2336 if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2337 --num_dual_infeasible_positions_;
2338 }
2339 dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2340
2341 // The leaving variable will also be dual-feasible.
2342 dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2343
2344 // Update the leaving row entering candidate status.
2345 OnDualPriceChange(squared_norms, leaving_row, variable_type[entering_col],
2346 threshold);
2347}
2348
2349template <typename Cols>
2350void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2351 const Cols& cols) {
2352 SCOPED_TIME_STAT(&function_stats_);
2353 bool something_to_do = false;
2354 const DenseBitRow::ConstView can_decrease =
2355 variables_info_.GetCanDecreaseBitRow().const_view();
2356 const DenseBitRow::ConstView can_increase =
2357 variables_info_.GetCanIncreaseBitRow().const_view();
2358 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
2359 const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2360 auto improvement_direction = dual_infeasibility_improvement_direction_.view();
2361 for (const ColIndex col : cols) {
2362 const Fractional reduced_cost = reduced_costs[col];
2363 const Fractional sign =
2364 (can_increase[col] && reduced_cost < -tolerance) ? 1.0
2365 : (can_decrease[col] && reduced_cost > tolerance) ? -1.0
2366 : 0.0;
2367 if (sign != improvement_direction[col]) {
2368 if (sign == 0.0) {
2369 --num_dual_infeasible_positions_;
2370 } else if (improvement_direction[col] == 0.0) {
2371 ++num_dual_infeasible_positions_;
2372 }
2373 if (!something_to_do) {
2374 initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2375 initially_all_zero_scratchpad_.ClearSparseMask();
2376 initially_all_zero_scratchpad_.non_zeros.clear();
2377 something_to_do = true;
2378 }
2379
2380 // We add a factor 10 because of the scattered access.
2381 num_update_price_operations_ +=
2382 10 * compact_matrix_.column(col).num_entries().value();
2383 compact_matrix_.ColumnAddMultipleToSparseScatteredColumn(
2384 col, sign - improvement_direction[col],
2385 &initially_all_zero_scratchpad_);
2386 improvement_direction[col] = sign;
2387 }
2388 }
2389 if (something_to_do) {
2390 initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2391 initially_all_zero_scratchpad_.ClearSparseMask();
2392 const DenseColumn::ConstView squared_norms =
2393 dual_edge_norms_.GetEdgeSquaredNorms();
2394
2395 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2396 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2397 basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2398 if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2399 dual_prices_.StartDenseUpdates();
2400 for (RowIndex row(0); row < num_rows_; ++row) {
2401 if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2402 dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2403 OnDualPriceChange</*use_dense_update=*/true>(
2404 squared_norms, row, variable_type[basis_[row]], threshold);
2405 }
2406 initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2407 } else {
2408 for (const auto e : initially_all_zero_scratchpad_) {
2409 dual_pricing_vector_[e.row()] += e.coefficient();
2410 OnDualPriceChange(squared_norms, e.row(),
2411 variable_type[basis_[e.row()]], threshold);
2412 initially_all_zero_scratchpad_[e.row()] = 0.0;
2413 }
2414 }
2415 initially_all_zero_scratchpad_.non_zeros.clear();
2416 }
2417}
2418
2419Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2420 RowIndex* leaving_row, Fractional* cost_variation,
2421 Fractional* target_bound) {
2422 SCOPED_TIME_STAT(&function_stats_);
2423 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2424 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2425 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2426 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2427
2428 // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2429 // positions and contains the sign in which the reduced cost of this column
2430 // needs to move to improve the feasibility otherwise (+1 or -1).
2431 //
2432 // Its current value was the one used to compute dual_pricing_vector_ and
2433 // was updated accordingly by DualPhaseIUpdatePrice().
2434 //
2435 // If more variables changed of dual-feasibility status during the last
2436 // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2437 // take them into account.
2438 if (reduced_costs_.AreReducedCostsRecomputed() ||
2439 dual_edge_norms_.NeedsBasisRefactorization() ||
2440 dual_pricing_vector_.empty()) {
2441 // Recompute everything from scratch.
2442 num_dual_infeasible_positions_ = 0;
2443 dual_pricing_vector_.AssignToZero(num_rows_);
2444 dual_prices_.ClearAndResize(num_rows_);
2445 dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2446 DualPhaseIUpdatePriceOnReducedCostChange(
2447 variables_info_.GetIsRelevantBitRow());
2448 } else {
2449 // Update row is still equal to the row used during the last iteration
2450 // to update the reduced costs.
2451 DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2452 }
2453
2454 // If there is no dual-infeasible position, we are done.
2455 *leaving_row = kInvalidRow;
2456 if (num_dual_infeasible_positions_ == 0) return Status::OK();
2457
2458 *leaving_row = dual_prices_.GetMaximum();
2459
2460 // Returns right away if there is no leaving variable or fill the other
2461 // return values otherwise.
2462 if (*leaving_row == kInvalidRow) return Status::OK();
2463 *cost_variation = dual_pricing_vector_[*leaving_row];
2464 const ColIndex leaving_col = basis_[*leaving_row];
2465 if (*cost_variation < 0.0) {
2466 *target_bound = upper_bounds[leaving_col];
2467 } else {
2468 *target_bound = lower_bounds[leaving_col];
2469 }
2470 DCHECK(IsFinite(*target_bound));
2471 return Status::OK();
2472}
2473
2474template <typename BoxedVariableCols>
2475void RevisedSimplex::MakeBoxedVariableDualFeasible(
2476 const BoxedVariableCols& cols, bool update_basic_values) {
2477 SCOPED_TIME_STAT(&function_stats_);
2478 std::vector<ColIndex> changed_cols;
2479
2480 // It is important to flip bounds within a tolerance because of precision
2481 // errors. Otherwise, this leads to cycling on many of the Netlib problems
2482 // since this is called at each iteration (because of the bound-flipping ratio
2483 // test).
2484 //
2485 // TODO(user): During an iteration, we might want to switch with a lower
2486 // tolerance bounds flip that were deemed good so that we can more easily make
2487 // progess?
2488 const Fractional threshold = reduced_costs_.GetDualFeasibilityTolerance();
2489
2490 const DenseRow& variable_values = variable_values_.GetDenseRow();
2491 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
2492 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2493 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2494 const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2495 for (const ColIndex col : cols) {
2496 const Fractional reduced_cost = reduced_costs[col];
2497 const VariableStatus status = variable_status[col];
2498 DCHECK(variables_info_.GetTypeRow()[col] ==
2500 // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2501 DCHECK(variable_values[col] == lower_bounds[col] ||
2502 variable_values[col] == upper_bounds[col] ||
2503 status == VariableStatus::BASIC);
2504 if (reduced_cost > threshold && status == VariableStatus::AT_UPPER_BOUND) {
2505 variables_info_.UpdateToNonBasicStatus(col,
2507 changed_cols.push_back(col);
2508 } else if (reduced_cost < -threshold &&
2510 variables_info_.UpdateToNonBasicStatus(col,
2512 changed_cols.push_back(col);
2513 }
2514 }
2515
2516 if (!changed_cols.empty()) {
2517 iteration_stats_.num_dual_flips.Add(changed_cols.size());
2518 variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2519 update_basic_values);
2520 }
2521}
2522
2523Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2524 RowIndex leaving_row, Fractional target_bound) {
2525 SCOPED_TIME_STAT(&function_stats_);
2526
2527 // We just want the leaving variable to go to its target_bound.
2528 const ColIndex leaving_col = basis_[leaving_row];
2529 const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2530 Fractional unscaled_step = leaving_variable_value - target_bound;
2531
2532 // In Chvatal p 157 update_[entering_col] is used instead of
2533 // direction_[leaving_row], but the two quantities are actually the
2534 // same. This is because update_[col] is the value at leaving_row of
2535 // the right inverse of col and direction_ is the right inverse of the
2536 // entering_col. Note that direction_[leaving_row] is probably more
2537 // precise.
2538 // TODO(user): use this to check precision and trigger recomputation.
2539 return unscaled_step / direction_[leaving_row];
2540}
2541
2542bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2543 VLOG(1) << "Test pivot.";
2544 SCOPED_TIME_STAT(&function_stats_);
2545 const ColIndex leaving_col = basis_[leaving_row];
2546 basis_[leaving_row] = entering_col;
2547
2548 // TODO(user): If 'is_ok' is true, we could use the computed lu in
2549 // basis_factorization_ rather than recompute it during UpdateAndPivot().
2550 CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2551 const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2552 basis_[leaving_row] = leaving_col;
2553 return is_ok;
2554}
2555
2556// Note that this function is an optimization and that if it was doing nothing
2557// the algorithm will still be correct and work. Using it does change the pivot
2558// taken during the simplex method though.
2559void RevisedSimplex::PermuteBasis() {
2560 SCOPED_TIME_STAT(&function_stats_);
2561
2562 // Fetch the current basis column permutation and return if it is empty which
2563 // means the permutation is the identity.
2564 const ColumnPermutation& col_perm =
2565 basis_factorization_.GetColumnPermutation();
2566 if (col_perm.empty()) return;
2567
2568 // Permute basis_.
2569 ApplyColumnPermutationToRowIndexedVector(col_perm.const_view(), &basis_,
2570 &tmp_basis_);
2571
2572 // Permute dual_pricing_vector_ if needed.
2573 if (!dual_pricing_vector_.empty()) {
2574 // TODO(user): We need to permute dual_prices_ too now, we recompute
2575 // everything one each basis factorization, so this don't matter.
2576 ApplyColumnPermutationToRowIndexedVector(col_perm.const_view(),
2577 &dual_pricing_vector_,
2578 &tmp_dual_pricing_vector_);
2579 }
2580
2581 // Notify the other classes.
2582 reduced_costs_.UpdateDataOnBasisPermutation();
2583 dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2584
2585 // Finally, remove the column permutation from all subsequent solves since
2586 // it has been taken into account in basis_.
2587 basis_factorization_.SetColumnPermutationToIdentity();
2588}
2589
2590Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2591 RowIndex leaving_row,
2592 Fractional target_bound) {
2593 SCOPED_TIME_STAT(&function_stats_);
2594
2595 // Tricky and a bit hacky.
2596 //
2597 // The basis update code assumes that we already computed the left inverse of
2598 // the leaving row, otherwise it will just refactorize the basis. This left
2599 // inverse is needed by update_row_.ComputeUpdateRow(), so in most case it
2600 // will already be computed. However, in some situation we don't need the
2601 // full update row, so just the left inverse can be computed.
2602 //
2603 // TODO(user): Ideally this shouldn't be needed if we are going to refactorize
2604 // the basis anyway. So we should know that before hand which is currently
2605 // hard to do.
2606 Fractional pivot_from_update_row;
2607 if (update_row_.IsComputedFor(leaving_row)) {
2608 pivot_from_update_row = update_row_.GetCoefficient(entering_col);
2609 } else {
2610 // We only need the left inverse and the update row position at the
2611 // entering_col to check precision.
2612 update_row_.ComputeUnitRowLeftInverse(leaving_row);
2613 pivot_from_update_row = compact_matrix_.ColumnScalarProduct(
2614 entering_col, update_row_.GetUnitRowLeftInverse().values);
2615 }
2616
2617 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2618 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2619 const ColIndex leaving_col = basis_[leaving_row];
2620 const VariableStatus leaving_variable_status =
2621 lower_bounds[leaving_col] == upper_bounds[leaving_col]
2623 : target_bound == lower_bounds[leaving_col]
2626 if (variable_values_.Get(leaving_col) != target_bound) {
2627 ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2628 target_bound);
2629 }
2630 UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2631
2632 // Test precision by comparing two ways to get the "pivot".
2633 const Fractional pivot_from_direction = direction_[leaving_row];
2634 const Fractional diff =
2635 std::abs(pivot_from_update_row - pivot_from_direction);
2636 if (diff > parameters_.refactorization_threshold() *
2637 (1.0 + std::min(std::abs(pivot_from_update_row),
2638 std::abs(pivot_from_direction)))) {
2639 VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2640 << " diff = " << diff;
2641 // TODO(user): We need to be careful when we modify parameter like this
2642 // since it will not be reset until the next SetParameters() call.
2643 if (basis_factorization_.NumUpdates() < 10) {
2644 Fractional threshold = parameters_.lu_factorization_pivot_threshold();
2645 threshold = std::min(threshold * 1.5, 0.9);
2646 VLOG(1) << "Increasing LU pivot threshold " << threshold;
2647 parameters_.set_lu_factorization_pivot_threshold(threshold);
2648 basis_factorization_.SetParameters(parameters_);
2649 }
2650
2651 last_refactorization_reason_ = RefactorizationReason::IMPRECISE_PIVOT;
2652 GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2653 } else {
2655 basis_factorization_.Update(entering_col, leaving_row, direction_));
2656 }
2657 if (basis_factorization_.IsRefactorized()) {
2658 PermuteBasis();
2659 }
2660 return Status::OK();
2661}
2662
2663Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2664 SCOPED_TIME_STAT(&function_stats_);
2665 if (*refactorize && !basis_factorization_.IsRefactorized()) {
2666 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2667 update_row_.Invalidate();
2668 PermuteBasis();
2669 }
2670 *refactorize = false;
2671 return Status::OK();
2672}
2673
2675 if (col >= integrality_scale_.size()) {
2676 integrality_scale_.resize(col + 1, 0.0);
2677 }
2678 integrality_scale_[col] = scale;
2679}
2680
2681Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2683 Cleanup update_deterministic_time_on_return(
2684 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2685
2686 // Get all non-basic variables with a reduced costs close to zero.
2687 // Note that because we only choose entering candidate with a cost of zero,
2688 // this set will not change (modulo epsilons).
2689 const DenseRow::ConstView rc = reduced_costs_.GetReducedCosts();
2690 std::vector<ColIndex> candidates;
2691 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2692 if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2693 if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2694 }
2695
2696 bool refactorize = false;
2697 int num_pivots = 0;
2698 Fractional total_gain = 0.0;
2699 for (int i = 0; i < 10; ++i) {
2700 AdvanceDeterministicTime(time_limit);
2701 if (time_limit->LimitReached()) break;
2702 if (num_pivots >= 5) break;
2703 if (candidates.empty()) break;
2704
2705 // Pick a random one and remove it from the list.
2706 const int index =
2707 std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2708 const ColIndex entering_col = candidates[index];
2709 std::swap(candidates[index], candidates.back());
2710 candidates.pop_back();
2711
2712 // We need the entering variable to move in the correct direction.
2713 Fractional fake_rc = 1.0;
2714 if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2715 CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2716 fake_rc = -1.0;
2717 }
2718
2719 // Refactorize if needed.
2720 if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2721 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2722
2723 // Compute the direction and by how much we can move along it.
2724 ComputeDirection(entering_col);
2725 Fractional step_length;
2726 RowIndex leaving_row;
2727 Fractional target_bound;
2728 bool local_refactorize = false;
2730 ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2731 &leaving_row, &step_length, &target_bound));
2732
2733 if (local_refactorize) continue;
2734 if (step_length == kInfinity || step_length == -kInfinity) continue;
2735 if (std::abs(step_length) <= 1e-6) continue;
2736 if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2737 continue;
2738 }
2739 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2740
2741 // Evaluate if pivot reduce the fractionality of the basis.
2742 //
2743 // TODO(user): Count with more weight variable with a small domain, i.e.
2744 // binary variable, compared to a variable in [0, 1k] ?
2745 const auto get_diff = [this](ColIndex col, Fractional old_value,
2746 Fractional new_value) {
2747 if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2748 return 0.0;
2749 }
2750 const Fractional s = integrality_scale_[col];
2751 return (std::abs(new_value * s - std::round(new_value * s)) -
2752 std::abs(old_value * s - std::round(old_value * s)));
2753 };
2754 Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2755 variable_values_.Get(entering_col) + step);
2756 for (const auto e : direction_) {
2757 const ColIndex col = basis_[e.row()];
2758 const Fractional old_value = variable_values_.Get(col);
2759 const Fractional new_value = old_value - e.coefficient() * step;
2760 diff += get_diff(col, old_value, new_value);
2761 }
2762
2763 // Ignore low decrease in integrality.
2764 if (diff > -1e-2) continue;
2765 total_gain -= diff;
2766
2767 // We perform the change.
2768 num_pivots++;
2769 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2770
2771 // This is a bound flip of the entering column.
2772 if (leaving_row == kInvalidRow) {
2773 if (step > 0.0) {
2774 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2776 } else if (step < 0.0) {
2777 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2779 }
2780 continue;
2781 }
2782
2783 // Perform the pivot.
2784 const ColIndex leaving_col = basis_[leaving_row];
2785 update_row_.ComputeUpdateRow(leaving_row);
2786
2787 // Note that this will only do work if the norms are computed.
2788 //
2789 // TODO(user): We should probably move all the "update" in a function so
2790 // that all "iterations" function can just reuse the same code. Everything
2791 // that is currently not "cleared" should be updated. If one does not want
2792 // that, then it is easy to call Clear() on the quantities that do not needs
2793 // to be kept in sync with the current basis.
2794 primal_edge_norms_.UpdateBeforeBasisPivot(
2795 entering_col, leaving_col, leaving_row, direction_, &update_row_);
2796 dual_edge_norms_.UpdateBeforeBasisPivot(
2797 entering_col, leaving_row, direction_,
2798 update_row_.GetUnitRowLeftInverse());
2799
2800 // TODO(user): Rather than maintaining this, it is probably better to
2801 // recompute it in one go after Polish() is done. We don't use the reduced
2802 // costs here as we just assume that the set of candidates does not change.
2803 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2804 &update_row_);
2805
2806 const Fractional dir = -direction_[leaving_row] * step;
2807 const bool is_degenerate =
2808 (dir == 0.0) ||
2809 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2810 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2811 if (!is_degenerate) {
2812 variable_values_.Set(leaving_col, target_bound);
2813 }
2815 UpdateAndPivot(entering_col, leaving_row, target_bound));
2816 }
2817
2818 VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2819 return Status::OK();
2820}
2821
2822// Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2823// x an n-vector.
2824//
2825// x is split in two parts x_B and x_N (B standing for basis).
2826// In the same way, A is split in A_B (also known as B) and A_N, and
2827// c is split into c_B and c_N.
2828//
2829// The goal is to minimize c_B.x_B + c_N.x_N
2830// subject to B.x_B + A_N.x_N = 0
2831// and x_lower <= x <= x_upper.
2832//
2833// To minimize c.x, at each iteration a variable from x_N is selected to
2834// enter the basis, and a variable from x_B is selected to leave the basis.
2835// To avoid explicit inversion of B, the algorithm solves two sub-systems:
2836// y.B = c_B and B.d = a (a being the entering column).
2837Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) {
2839 Cleanup update_deterministic_time_on_return(
2840 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2841 num_consecutive_degenerate_iterations_ = 0;
2842 bool refactorize = false;
2843 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
2844
2845 // At this point, we are not sure the prices are always up to date, so
2846 // lets always reset them for the first iteration below.
2847 primal_prices_.ForceRecomputation();
2848
2849 if (phase_ == Phase::FEASIBILITY) {
2850 // Initialize the primal phase-I objective.
2851 // Note that this temporarily erases the problem objective.
2852 objective_.AssignToZero(num_cols_);
2853 variable_values_.UpdatePrimalPhaseICosts(
2854 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2855 reduced_costs_.ResetForNewObjective();
2856 }
2857
2858 while (true) {
2859 AdvanceDeterministicTime(time_limit);
2860 if (time_limit->LimitReached()) break;
2861
2862 // TODO(user): we may loop a bit more than the actual number of iteration.
2863 // fix.
2865 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2866
2867 // Trigger a refactorization if one of the class we use request it.
2868 if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) {
2869 last_refactorization_reason_ = RefactorizationReason::RC;
2870 refactorize = true;
2871 }
2872 if (!refactorize && primal_edge_norms_.NeedsBasisRefactorization()) {
2873 last_refactorization_reason_ = RefactorizationReason::NORM;
2874 refactorize = true;
2875 }
2876 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2877
2878 if (basis_factorization_.IsRefactorized()) {
2879 CorrectErrorsOnVariableValues();
2880 DisplayIterationInfo(/*primal=*/true, last_refactorization_reason_);
2881 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
2882
2883 if (phase_ == Phase::FEASIBILITY) {
2884 // Since the variable values may have been recomputed, we need to
2885 // recompute the primal infeasible variables and update their costs.
2886 if (variable_values_.UpdatePrimalPhaseICosts(
2887 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2888 &objective_)) {
2889 reduced_costs_.ResetForNewObjective();
2890 }
2891 }
2892
2893 // Computing the objective at each iteration takes time, so we just
2894 // check the limit when the basis is refactorized.
2895 if (phase_ == Phase::OPTIMIZATION &&
2896 ComputeObjectiveValue() < primal_objective_limit_) {
2897 VLOG(1) << "Stopping the primal simplex because"
2898 << " the objective limit " << primal_objective_limit_
2899 << " has been reached.";
2900 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2901 objective_limit_reached_ = true;
2902 return Status::OK();
2903 }
2904 } else if (phase_ == Phase::FEASIBILITY) {
2905 // Note that direction_.non_zeros contains the positions of the basic
2906 // variables whose values were updated during the last iteration.
2907 if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2908 &objective_)) {
2909 reduced_costs_.ResetForNewObjective();
2910 }
2911 }
2912
2913 const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
2914 if (entering_col == kInvalidCol) {
2915 if (reduced_costs_.AreReducedCostsPrecise() &&
2916 basis_factorization_.IsRefactorized()) {
2917 if (phase_ == Phase::FEASIBILITY) {
2918 const Fractional primal_infeasibility =
2919 variable_values_.ComputeMaximumPrimalInfeasibility();
2920 if (primal_infeasibility <
2921 parameters_.primal_feasibility_tolerance()) {
2922 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2923 } else {
2924 VLOG(1) << "Infeasible problem! infeasibility = "
2925 << primal_infeasibility;
2926 problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2927 }
2928 } else {
2929 problem_status_ = ProblemStatus::OPTIMAL;
2930 }
2931 break;
2932 }
2933
2934 VLOG(1) << "Optimal reached, double checking...";
2935 reduced_costs_.MakeReducedCostsPrecise();
2936 refactorize = true;
2937 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
2938 continue;
2939 }
2940
2941 DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2942
2943 // Solve the system B.d = a with a the entering column.
2944 ComputeDirection(entering_col);
2945
2946 // This might trigger a recomputation on the next iteration. If it returns
2947 // false, we will also try to see if there is not another more promising
2948 // entering column.
2949 if (!primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2950 direction_)) {
2951 primal_prices_.RecomputePriceAt(entering_col);
2952 continue;
2953 }
2954 const Fractional reduced_cost =
2955 reduced_costs_.TestEnteringReducedCostPrecision(entering_col,
2956 direction_);
2957
2958 // The test might have changed the reduced cost of the entering_col.
2959 // If it is no longer a valid entering candidate, we loop.
2960 primal_prices_.RecomputePriceAt(entering_col);
2961 if (!reduced_costs_.IsValidPrimalEnteringCandidate(entering_col)) {
2962 reduced_costs_.MakeReducedCostsPrecise();
2963 VLOG(1) << "Skipping col #" << entering_col
2964 << " whose reduced cost is no longer valid under precise reduced "
2965 "cost: "
2966 << reduced_cost;
2967 continue;
2968 }
2969
2970 // This test takes place after the check for optimality/feasibility because
2971 // when running with 0 iterations, we still want to report
2972 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2973 // case at the beginning of the algorithm.
2974 if (num_iterations_ == parameters_.max_number_of_iterations()) break;
2975
2976 Fractional step_length;
2977 RowIndex leaving_row;
2978 Fractional target_bound;
2979 if (phase_ == Phase::FEASIBILITY) {
2980 PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2981 &refactorize, &leaving_row,
2982 &step_length, &target_bound);
2983 } else {
2985 ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2986 &leaving_row, &step_length, &target_bound));
2987 }
2988 if (refactorize) {
2989 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
2990 continue;
2991 }
2992
2993 if (step_length == kInfinity || step_length == -kInfinity) {
2994 // On a validated input, we shouldn't have a length of -infinity even
2995 // though it can be slightly negative in some settings.
2996 DCHECK_NE(step_length, -kInfinity);
2997 if (!basis_factorization_.IsRefactorized() ||
2998 !reduced_costs_.AreReducedCostsPrecise()) {
2999 VLOG(1) << "Infinite step length, double checking...";
3000 reduced_costs_.MakeReducedCostsPrecise();
3001 refactorize = true;
3002 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3003 continue;
3004 }
3005 if (phase_ == Phase::FEASIBILITY) {
3006 // This shouldn't happen by construction.
3007 VLOG(1) << "Unbounded feasibility problem !?";
3008 problem_status_ = ProblemStatus::ABNORMAL;
3009 } else {
3010 problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
3011 solution_primal_ray_.AssignToZero(num_cols_);
3012 for (RowIndex row(0); row < num_rows_; ++row) {
3013 const ColIndex col = basis_[row];
3014 solution_primal_ray_[col] = -direction_[row];
3015 }
3016 solution_primal_ray_[entering_col] = 1.0;
3017 if (reduced_cost > 0.0) {
3018 ChangeSign(&solution_primal_ray_);
3019 }
3020 }
3021 break;
3022 }
3023
3024 Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
3025 if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
3026 // For phase-I we currently always set the leaving variable to its exact
3027 // bound even if by doing so we may take a small step in the wrong
3028 // direction and may increase the overall infeasibility.
3029 //
3030 // TODO(user): Investigate alternatives even if this seems to work well in
3031 // practice. Note that the final returned solution will have the property
3032 // that all non-basic variables are at their exact bound, so it is nice
3033 // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
3034 // this property cannot be found.
3035 step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3036 }
3037
3038 // Store the leaving_col before basis_ change.
3039 const ColIndex leaving_col =
3040 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3041
3042 // An iteration is called 'degenerate' if the leaving variable is already
3043 // primal-infeasible and we make it even more infeasible or if we do a zero
3044 // step.
3045 bool is_degenerate = false;
3046 if (leaving_row != kInvalidRow) {
3047 Fractional dir = -direction_[leaving_row] * step;
3048 is_degenerate =
3049 (dir == 0.0) ||
3050 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3051 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3052
3053 // If the iteration is not degenerate, the leaving variable should go to
3054 // its exact target bound (it is how the step is computed).
3055 if (!is_degenerate) {
3056 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3057 target_bound));
3058 }
3059 }
3060
3061 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3062 if (leaving_row != kInvalidRow) {
3063 // Important: the norm must be updated before the reduced_cost.
3064 primal_edge_norms_.UpdateBeforeBasisPivot(
3065 entering_col, basis_[leaving_row], leaving_row, direction_,
3066 &update_row_);
3067 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
3068 direction_, &update_row_);
3069 primal_prices_.UpdateBeforeBasisPivot(entering_col, &update_row_);
3070 if (!is_degenerate) {
3071 // On a non-degenerate iteration, the leaving variable should be at its
3072 // exact bound. This corrects an eventual small numerical error since
3073 // 'value + direction * step' where step is
3074 // '(target_bound - value) / direction'
3075 // may be slighlty different from target_bound.
3076 variable_values_.Set(leaving_col, target_bound);
3077 }
3079 UpdateAndPivot(entering_col, leaving_row, target_bound));
3081 if (is_degenerate) {
3082 timer.AlsoUpdate(&iteration_stats_.degenerate);
3083 } else {
3084 timer.AlsoUpdate(&iteration_stats_.normal);
3085 }
3086 });
3087 } else {
3088 // Bound flip. This makes sure that the flipping variable is at its bound
3089 // and has the correct status.
3091 variables_info_.GetTypeRow()[entering_col]);
3092 if (step > 0.0) {
3093 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3095 } else if (step < 0.0) {
3096 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3098 }
3099 primal_prices_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
3100 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3101 }
3102
3103 if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
3104 // Set the leaving variable to its exact bound.
3105 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3106
3107 // Change the objective value of the leaving variable to zero.
3108 reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
3109 &objective_[leaving_col]);
3110 primal_prices_.RecomputePriceAt(leaving_col);
3111 }
3112
3113 // Stats about consecutive degenerate iterations.
3114 if (step_length == 0.0) {
3115 num_consecutive_degenerate_iterations_++;
3116 } else {
3117 if (num_consecutive_degenerate_iterations_ > 0) {
3118 iteration_stats_.degenerate_run_size.Add(
3119 num_consecutive_degenerate_iterations_);
3120 num_consecutive_degenerate_iterations_ = 0;
3121 }
3122 }
3123 ++num_iterations_;
3124 }
3125 if (num_consecutive_degenerate_iterations_ > 0) {
3126 iteration_stats_.degenerate_run_size.Add(
3127 num_consecutive_degenerate_iterations_);
3128 }
3129 return Status::OK();
3130}
3131
3132// TODO(user): Two other approaches for the phase I described in Koberstein's
3133// PhD thesis seem worth trying at some point:
3134// - The subproblem approach, which enables one to use a normal phase II dual,
3135// but requires an efficient bound-flipping ratio test since the new problem
3136// has all its variables boxed. This one is implemented now, but require
3137// a bit more tuning.
3138// - Pan's method, which is really fast but have no theoretical guarantee of
3139// terminating and thus needs to use one of the other methods as a fallback if
3140// it fails to make progress.
3141//
3142// Note that the returned status applies to the primal problem!
3143Status RevisedSimplex::DualMinimize(bool feasibility_phase,
3144 TimeLimit* time_limit) {
3145 Cleanup update_deterministic_time_on_return(
3146 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3147 num_consecutive_degenerate_iterations_ = 0;
3148 bool refactorize = false;
3149 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
3150
3151 bound_flip_candidates_.clear();
3152
3153 // Leaving variable.
3154 RowIndex leaving_row;
3155 Fractional cost_variation;
3156 Fractional target_bound;
3157
3158 // Entering variable.
3159 ColIndex entering_col;
3160
3161 while (true) {
3162 AdvanceDeterministicTime(time_limit);
3163 if (time_limit->LimitReached()) break;
3164
3165 // TODO(user): we may loop a bit more than the actual number of iteration.
3166 // fix.
3168 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
3169
3170 // Trigger a refactorization if one of the class we use request it.
3171 //
3172 // TODO(user): Estimate when variable values are imprecise and refactor too.
3173 const bool old_refactorize_value = refactorize;
3174 if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) {
3175 last_refactorization_reason_ = RefactorizationReason::RC;
3176 refactorize = true;
3177 }
3178 if (!refactorize && dual_edge_norms_.NeedsBasisRefactorization()) {
3179 last_refactorization_reason_ = RefactorizationReason::NORM;
3180 refactorize = true;
3181 }
3182 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3183
3184 // If the basis is refactorized, we recompute all the values in order to
3185 // have a good precision.
3186 if (basis_factorization_.IsRefactorized()) {
3187 // We do not want to recompute the reduced costs too often, this is
3188 // because that may break the overall direction taken by the last steps
3189 // and may lead to less improvement on degenerate problems.
3190 //
3191 // For now, we just recompute them if refactorize was set during the
3192 // loop and not because of normal refactorization.
3193 //
3194 // During phase-I, we do want the reduced costs to be as precise as
3195 // possible. TODO(user): Investigate why and fix the TODO in
3196 // PermuteBasis().
3197 //
3198 // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
3199 // do recompute them, it is better to do that first.
3200 if (feasibility_phase || old_refactorize_value) {
3201 reduced_costs_.MakeReducedCostsPrecise();
3202 }
3203
3204 // TODO(user): Make RecomputeBasicVariableValues() do nothing
3205 // if it was already recomputed on a refactorized basis. This is the
3206 // same behavior as MakeReducedCostsPrecise().
3207 //
3208 // TODO(user): Do not recompute the variable values each time we
3209 // refactorize the matrix, like for the reduced costs? That may lead to
3210 // a worse behavior than keeping the "imprecise" version and only
3211 // recomputing it when its precision is above a threshold.
3212 if (!feasibility_phase) {
3213 MakeBoxedVariableDualFeasible(
3214 variables_info_.GetNonBasicBoxedVariables(),
3215 /*update_basic_values=*/false);
3216 variable_values_.RecomputeBasicVariableValues();
3217 variable_values_.RecomputeDualPrices(
3218 parameters_.dual_price_prioritize_norm());
3219
3220 // Computing the objective at each iteration takes time, so we just
3221 // check the limit when the basis is refactorized.
3222 //
3223 // Hack: We need phase_ here and not the local feasibility_phase
3224 // variable because this must not be checked for the dual phase I algo
3225 // that use the same code as the dual phase II (i.e. the local
3226 // feasibility_phase will be false).
3227 if (phase_ == Phase::OPTIMIZATION &&
3228 dual_objective_limit_ != kInfinity &&
3229 ComputeObjectiveValue() > dual_objective_limit_) {
3230 SOLVER_LOG(logger_,
3231 "Stopping the dual simplex because"
3232 " the objective limit ",
3233 dual_objective_limit_, " has been reached.");
3234 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3235 objective_limit_reached_ = true;
3236 return Status::OK();
3237 }
3238 }
3239
3240 DisplayIterationInfo(/*primal=*/false, last_refactorization_reason_);
3241 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
3242 } else {
3243 // Updates from the previous iteration that can be skipped if we
3244 // recompute everything (see other case above).
3245 if (!feasibility_phase) {
3246 // Make sure the boxed variables are dual-feasible before choosing the
3247 // leaving variable row.
3248 MakeBoxedVariableDualFeasible(bound_flip_candidates_,
3249 /*update_basic_values=*/true);
3250 bound_flip_candidates_.clear();
3251
3252 // The direction_.non_zeros contains the positions for which the basic
3253 // variable value was changed during the previous iterations.
3254 variable_values_.UpdateDualPrices(direction_.non_zeros);
3255 }
3256 }
3257
3258 if (feasibility_phase) {
3259 GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
3260 &leaving_row, &cost_variation, &target_bound));
3261 } else {
3262 GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
3263 &leaving_row, &cost_variation, &target_bound));
3264 }
3265 if (leaving_row == kInvalidRow) {
3266 // TODO(user): integrate this with the main "re-optimization" loop.
3267 // Also distinguish cost perturbation and shifts?
3268 if (!basis_factorization_.IsRefactorized() ||
3269 reduced_costs_.HasCostShift()) {
3270 VLOG(1) << "Optimal reached, double checking.";
3271 reduced_costs_.ClearAndRemoveCostShifts();
3272 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3273 refactorize = true;
3274 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3275 continue;
3276 }
3277 if (feasibility_phase) {
3278 // Note that since the basis is refactorized, the variable values
3279 // will be recomputed at the beginning of the second phase. The boxed
3280 // variable values will also be corrected by
3281 // MakeBoxedVariableDualFeasible().
3282 if (num_dual_infeasible_positions_ == 0) {
3283 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3284 } else {
3285 VLOG(1) << "DUAL infeasible in dual phase I.";
3286 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
3287 }
3288 } else {
3289 problem_status_ = ProblemStatus::OPTIMAL;
3290 }
3291 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3292 return Status::OK();
3293 }
3294
3295 update_row_.ComputeUnitRowLeftInverse(leaving_row);
3296 if (!dual_edge_norms_.TestPrecision(leaving_row,
3297 update_row_.GetUnitRowLeftInverse())) {
3298 // We rechoose a potentially different leaving row. Note that if we choose
3299 // the same, we shouldn't go back here since the norm will now pass the
3300 // test.
3301 if (feasibility_phase) {
3302 const Fractional price = dual_pricing_vector_[leaving_row];
3303 const DenseColumn::ConstView squared_norms =
3304 dual_edge_norms_.GetEdgeSquaredNorms();
3305 dual_prices_.AddOrUpdate(leaving_row,
3306 Square(price) / squared_norms[leaving_row]);
3307 } else {
3308 variable_values_.UpdateDualPrices({leaving_row});
3309 }
3310 continue;
3311 }
3312 update_row_.ComputeUpdateRow(leaving_row);
3313
3314 if (feasibility_phase) {
3315 GLOP_RETURN_IF_ERROR(entering_variable_.DualPhaseIChooseEnteringColumn(
3316 reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3317 &entering_col));
3318 } else {
3319 GLOP_RETURN_IF_ERROR(entering_variable_.DualChooseEnteringColumn(
3320 reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3321 &bound_flip_candidates_, &entering_col));
3322 }
3323
3324 // No entering_col: dual unbounded (i.e. primal infeasible).
3325 if (entering_col == kInvalidCol) {
3326 if (!reduced_costs_.AreReducedCostsPrecise()) {
3327 VLOG(1) << "No entering column. Double checking...";
3328 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3329 refactorize = true;
3330 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3331 continue;
3332 }
3333 DCHECK(basis_factorization_.IsRefactorized());
3334 if (feasibility_phase) {
3335 // This shouldn't happen by construction.
3336 VLOG(1) << "Unbounded dual feasibility problem !?";
3337 problem_status_ = ProblemStatus::ABNORMAL;
3338 } else {
3339 problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
3340 solution_dual_ray_ =
3341 Transpose(update_row_.GetUnitRowLeftInverse().values);
3342 update_row_.ComputeFullUpdateRow(leaving_row,
3343 &solution_dual_ray_row_combination_);
3344 if (cost_variation < 0) {
3345 ChangeSign(&solution_dual_ray_);
3346 ChangeSign(&solution_dual_ray_row_combination_);
3347 }
3348 }
3349 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3350 return Status::OK();
3351 }
3352
3353 // If the coefficient is too small, we recompute the reduced costs if not
3354 // already done. This is an extra heuristic to avoid computing the direction
3355 // If the pivot is small. But the real recomputation step is just below.
3356 const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
3357 if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
3358 !reduced_costs_.AreReducedCostsPrecise()) {
3359 VLOG(1) << "Trying not to pivot by " << entering_coeff;
3360 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3361 refactorize = true;
3362 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
3363 continue;
3364 }
3365
3366 ComputeDirection(entering_col);
3367
3368 // If the pivot is small compared to others in the direction_ vector we try
3369 // to recompute everything. If we cannot, then note that
3370 // DualChooseEnteringColumn() should guaranteed that the pivot is not too
3371 // small when everything has already been recomputed.
3372 if (std::abs(direction_[leaving_row]) <
3373 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
3374 if (!reduced_costs_.AreReducedCostsPrecise()) {
3375 VLOG(1) << "Trying not pivot by " << entering_coeff << " ("
3376 << direction_[leaving_row]
3377 << ") because the direction has a norm of "
3378 << direction_infinity_norm_;
3379 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3380 refactorize = true;
3381 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
3382 continue;
3383 }
3384 }
3385
3386 // This is to avoid crashes below. It seems to happen rarely on a few miplib
3387 // problem like rmine11.pb.gz in multithread.
3388 //
3389 // TODO(user): Try to recover instead.
3390 if (std::abs(direction_[leaving_row]) <= 1e-20) {
3391 const std::string error_message = absl::StrCat(
3392 "trying to pivot with number too small: ", direction_[leaving_row]);
3393 SOLVER_LOG(logger_, error_message);
3394 return Status(Status::ERROR_LU, error_message);
3395 }
3396
3397 // This test takes place after the check for optimality/feasibility because
3398 // when running with 0 iterations, we still want to report
3399 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
3400 // case at the beginning of the algorithm.
3401 if (num_iterations_ == parameters_.max_number_of_iterations()) {
3402 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3403 return Status::OK();
3404 }
3405
3406 // Before we update the reduced costs, if its sign is already dual
3407 // infeasible and the update direction will make it worse we make sure the
3408 // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that
3409 // goes in the wrong direction (a few experiments seems to indicate that
3410 // this is not a good idea). See comment at the top of UpdateReducedCosts().
3411 //
3412 // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in
3413 // order to do a non-zero step. This helps on degenerate problems. Like the
3414 // pertubation, we will remove all these shifts at the end.
3415 const bool increasing_rc_is_needed =
3416 (cost_variation > 0.0) == (entering_coeff > 0.0);
3417 reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col);
3418
3420 if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed,
3421 entering_col)) {
3422 timer.AlsoUpdate(&iteration_stats_.degenerate);
3423 } else {
3424 timer.AlsoUpdate(&iteration_stats_.normal);
3425 }
3426 });
3427
3428 // Update basis. Note that direction_ is already computed.
3429 //
3430 // TODO(user): this is pretty much the same in the primal or dual code.
3431 // We just need to know to what bound the leaving variable will be set to.
3432 // Factorize more common code?
3433 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
3434 &update_row_);
3435 dual_edge_norms_.UpdateBeforeBasisPivot(
3436 entering_col, leaving_row, direction_,
3437 update_row_.GetUnitRowLeftInverse());
3438
3439 // During phase I, we do not need the basic variable values at all.
3440 // Important: The norm should be updated before that.
3441 Fractional primal_step = 0.0;
3442 if (feasibility_phase) {
3443 DualPhaseIUpdatePrice(leaving_row, entering_col);
3444 } else {
3445 primal_step =
3446 ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3447 variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
3448 }
3449
3450 // It is important to do the actual pivot after the update above!
3451 const ColIndex leaving_col = basis_[leaving_row];
3453 UpdateAndPivot(entering_col, leaving_row, target_bound));
3454
3455 // This makes sure the leaving variable is at its exact bound. Tests
3456 // indicate that this makes everything more stable. Note also that during
3457 // the feasibility phase, the variable values are not used, but that the
3458 // correct non-basic variable value are needed at the end.
3459 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3460
3461 ++num_iterations_;
3462 }
3463 return Status::OK();
3464}
3465
3466Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) {
3468 Cleanup update_deterministic_time_on_return(
3469 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3470 bool refactorize = false;
3471
3472 // We clear all the quantities that we don't update so they will be recomputed
3473 // later if needed.
3474 primal_edge_norms_.Clear();
3475 dual_edge_norms_.Clear();
3476 update_row_.Invalidate();
3477 reduced_costs_.ClearAndRemoveCostShifts();
3478
3479 std::vector<ColIndex> super_basic_cols;
3480 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3481 if (variables_info_.GetStatusRow()[col] == VariableStatus::FREE &&
3482 variable_values_.Get(col) != 0) {
3483 super_basic_cols.push_back(col);
3484 }
3485 }
3486
3487 while (!super_basic_cols.empty()) {
3488 AdvanceDeterministicTime(time_limit);
3489 if (time_limit->LimitReached()) break;
3490
3492 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
3493 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3494 if (basis_factorization_.IsRefactorized()) {
3495 CorrectErrorsOnVariableValues();
3496 DisplayIterationInfo(/*primal=*/true);
3497 }
3498
3499 // TODO(user): Select at random like in Polish().
3500 ColIndex entering_col = super_basic_cols.back();
3501
3502 DCHECK(variables_info_.GetCanDecreaseBitRow()[entering_col]);
3503 DCHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
3504
3505 // Decide which direction to send the entering column.
3506 // UNCONSTRAINED variables go towards zero. Other variables go towards their
3507 // closest bound. We assume that we're at an optimal solution, so all FREE
3508 // variables have approximately zero reduced cost, which means that the
3509 // objective value won't change from moving this column into the basis.
3510 // TODO(user): As an improvement for variables with two bounds, try both
3511 // and pick one that doesn't require a basis change (if possible), otherwise
3512 // pick the closer bound.
3513 Fractional fake_rc;
3514 const Fractional entering_value = variable_values_.Get(entering_col);
3515 if (variables_info_.GetTypeRow()[entering_col] ==
3517 if (entering_value > 0) {
3518 fake_rc = 1.0;
3519 } else {
3520 fake_rc = -1.0;
3521 }
3522 } else {
3523 const Fractional diff_ub =
3524 variables_info_.GetVariableUpperBounds()[entering_col] -
3525 entering_value;
3526 const Fractional diff_lb =
3527 entering_value -
3528 variables_info_.GetVariableLowerBounds()[entering_col];
3529 if (diff_lb <= diff_ub) {
3530 fake_rc = 1.0;
3531 } else {
3532 fake_rc = -1.0;
3533 }
3534 }
3535
3536 // Solve the system B.d = a with a the entering column.
3537 ComputeDirection(entering_col);
3538
3539 Fractional step_length;
3540 RowIndex leaving_row;
3541 Fractional target_bound;
3542
3543 GLOP_RETURN_IF_ERROR(ChooseLeavingVariableRow(entering_col, fake_rc,
3544 &refactorize, &leaving_row,
3545 &step_length, &target_bound));
3546
3547 if (refactorize) continue;
3548
3549 // At this point, we know the iteration will finish or stop with an error.
3550 super_basic_cols.pop_back();
3551
3552 if (step_length == kInfinity || step_length == -kInfinity) {
3553 if (variables_info_.GetTypeRow()[entering_col] ==
3555 step_length = std::fabs(entering_value);
3556 } else {
3557 VLOG(1) << "Infinite step for bounded variable ?!";
3558 problem_status_ = ProblemStatus::ABNORMAL;
3559 break;
3560 }
3561 }
3562
3563 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
3564
3565 // Store the leaving_col before basis_ change.
3566 const ColIndex leaving_col =
3567 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3568
3569 // An iteration is called 'degenerate' if the leaving variable is already
3570 // primal-infeasible and we make it even more infeasible or if we do a zero
3571 // step.
3572 // TODO(user): Test setting the step size to zero for degenerate steps.
3573 // We don't need to force a positive step because each super-basic variable
3574 // is pivoted in exactly once.
3575 bool is_degenerate = false;
3576 if (leaving_row != kInvalidRow) {
3577 Fractional dir = -direction_[leaving_row] * step;
3578 is_degenerate =
3579 (dir == 0.0) ||
3580 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3581 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3582
3583 // If the iteration is not degenerate, the leaving variable should go to
3584 // its exact target bound (it is how the step is computed).
3585 if (!is_degenerate) {
3586 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3587 target_bound));
3588 }
3589 }
3590
3591 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3592 if (leaving_row != kInvalidRow) {
3593 if (!is_degenerate) {
3594 // On a non-degenerate iteration, the leaving variable should be at its
3595 // exact bound. This corrects an eventual small numerical error since
3596 // 'value + direction * step' where step is
3597 // '(target_bound - value) / direction'
3598 // may be slighlty different from target_bound.
3599 variable_values_.Set(leaving_col, target_bound);
3600 }
3602 UpdateAndPivot(entering_col, leaving_row, target_bound));
3604 if (is_degenerate) {
3605 timer.AlsoUpdate(&iteration_stats_.degenerate);
3606 } else {
3607 timer.AlsoUpdate(&iteration_stats_.normal);
3608 }
3609 });
3610 } else {
3611 // Snap the super-basic variable to its bound. Note that
3612 // variable_values_.UpdateOnPivoting() should already be close to that but
3613 // here we make sure it is exact and remove any small numerical errors.
3614 if (variables_info_.GetTypeRow()[entering_col] ==
3616 variable_values_.Set(entering_col, 0.0);
3617 } else if (step > 0.0) {
3618 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3620 } else if (step < 0.0) {
3621 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3623 }
3624 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3625 }
3626
3627 ++num_iterations_;
3628 }
3629
3630 if (!super_basic_cols.empty()) {
3631 SOLVER_LOG(logger_, "Push terminated early with ", super_basic_cols.size(),
3632 " super-basic variables remaining.");
3633 }
3634
3635 // TODO(user): What status should be returned if the time limit is hit?
3636 // If the optimization phase finished, then OPTIMAL is technically correct
3637 // but also misleading.
3638
3639 return Status::OK();
3640}
3641
3642ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3643 DCHECK_ROW_BOUNDS(row);
3644 return first_slack_col_ + RowToColIndex(row);
3645}
3646
3648 std::string result;
3649 result.append(iteration_stats_.StatString());
3650 result.append(ratio_test_stats_.StatString());
3651 result.append(entering_variable_.StatString());
3652 result.append(dual_prices_.StatString());
3653 result.append(reduced_costs_.StatString());
3654 result.append(variable_values_.StatString());
3655 result.append(primal_edge_norms_.StatString());
3656 result.append(dual_edge_norms_.StatString());
3657 result.append(update_row_.StatString());
3658 result.append(basis_factorization_.StatString());
3659 result.append(function_stats_.StatString());
3660 return result;
3661}
3662
3663void RevisedSimplex::DisplayAllStats() {
3664 if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3665 absl::FPrintF(stderr, "%s", StatString());
3666 absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3667 }
3668}
3669
3670Fractional RevisedSimplex::ComputeObjectiveValue() const {
3671 SCOPED_TIME_STAT(&function_stats_);
3672 return PreciseScalarProduct(objective_,
3673 Transpose(variable_values_.GetDenseRow()));
3674}
3675
3676Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3677 SCOPED_TIME_STAT(&function_stats_);
3678 const Fractional sum = PreciseScalarProduct(
3679 objective_, Transpose(variable_values_.GetDenseRow()));
3680 return objective_scaling_factor_ * (sum + objective_offset_);
3681}
3682
3683void RevisedSimplex::SetParameters(const GlopParameters& parameters) {
3684 SCOPED_TIME_STAT(&function_stats_);
3685 deterministic_random_.seed(parameters.random_seed());
3686 absl_random_ = absl::BitGen(absl::SeedSeq({parameters.random_seed()}));
3687 initial_parameters_ = parameters;
3688 parameters_ = parameters;
3689 PropagateParameters();
3690}
3691
3692void RevisedSimplex::PropagateParameters() {
3693 SCOPED_TIME_STAT(&function_stats_);
3694 basis_factorization_.SetParameters(parameters_);
3695 entering_variable_.SetParameters(parameters_);
3696 reduced_costs_.SetParameters(parameters_);
3697 dual_edge_norms_.SetParameters(parameters_);
3698 primal_edge_norms_.SetParameters(parameters_);
3699 update_row_.SetParameters(parameters_);
3700}
3701
3702void RevisedSimplex::DisplayIterationInfo(bool primal,
3703 RefactorizationReason reason) {
3704 if (!logger_->LoggingIsEnabled()) return;
3705 const std::string first_word = primal ? "Primal " : "Dual ";
3706
3707 // We display the info on each re-factorization, and it is nice to show what
3708 // triggered the issue. Note that we don't display normal refactorization when
3709 // we decide that it is worth it for the solve time or we reach the fixed
3710 // refactorization period.
3711 std::string info;
3712 if (reason != RefactorizationReason::DEFAULT) {
3713 switch (reason) {
3714 case RefactorizationReason::DEFAULT:
3715 info = " [default]";
3716 break;
3717 case RefactorizationReason::SMALL_PIVOT:
3718 info = " [small pivot]";
3719 break;
3720 case RefactorizationReason::IMPRECISE_PIVOT:
3721 info = " [imprecise pivot]";
3722 break;
3723 case RefactorizationReason::NORM:
3724 info = " [norms]";
3725 break;
3726 case RefactorizationReason::RC:
3727 info = " [reduced costs]";
3728 break;
3729 case RefactorizationReason::VAR_VALUES:
3730 info = " [var values]";
3731 break;
3732 case RefactorizationReason::FINAL_CHECK:
3733 info = " [check]";
3734 break;
3735 }
3736 }
3737
3738 switch (phase_) {
3739 case Phase::FEASIBILITY: {
3740 const int64_t iter = num_iterations_;
3741 std::string name;
3742 Fractional objective;
3743 if (parameters_.use_dual_simplex()) {
3744 if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
3745 objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
3746 } else {
3747 // The internal objective of the transformed problem is the negation
3748 // of the sum of the dual infeasibility of the original problem.
3749 objective = -PreciseScalarProduct(
3750 objective_, Transpose(variable_values_.GetDenseRow()));
3751 }
3752 name = "sum_dual_infeasibilities";
3753 } else {
3754 objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
3755 name = "sum_primal_infeasibilities";
3756 }
3757
3758 SOLVER_LOG(logger_, first_word, "feasibility phase, iteration # ", iter,
3759 ", ", name, " = ", absl::StrFormat("%.15E", objective), info);
3760 break;
3761 }
3762 case Phase::OPTIMIZATION: {
3763 const int64_t iter = num_iterations_ - num_feasibility_iterations_;
3764 // Note that in the dual phase II, ComputeObjectiveValue() is also
3765 // computing the dual objective even if it uses the variable values.
3766 // This is because if we modify the bounds to make the problem
3767 // primal-feasible, we are at the optimal and hence the two objectives
3768 // are the same.
3769 const Fractional objective = ComputeInitialProblemObjectiveValue();
3770 SOLVER_LOG(logger_, first_word, "optimization phase, iteration # ", iter,
3771 ", objective = ", absl::StrFormat("%.15E", objective), info);
3772 break;
3773 }
3774 case Phase::PUSH: {
3775 const int64_t iter = num_iterations_ - num_feasibility_iterations_ -
3776 num_optimization_iterations_;
3777 SOLVER_LOG(logger_, first_word, "push phase, iteration # ", iter,
3778 ", remaining_variables_to_push = ",
3779 ComputeNumberOfSuperBasicVariables(), info);
3780 }
3781 }
3782}
3783
3784void RevisedSimplex::DisplayErrors() {
3785 if (!logger_->LoggingIsEnabled()) return;
3786 SOLVER_LOG(logger_,
3787 "Current status: ", GetProblemStatusString(problem_status_));
3788 SOLVER_LOG(logger_, "Primal infeasibility (bounds) = ",
3789 variable_values_.ComputeMaximumPrimalInfeasibility());
3790 SOLVER_LOG(logger_, "Primal residual |A.x - b| = ",
3791 variable_values_.ComputeMaximumPrimalResidual());
3792 SOLVER_LOG(logger_, "Dual infeasibility (reduced costs) = ",
3793 reduced_costs_.ComputeMaximumDualInfeasibility());
3794 SOLVER_LOG(logger_, "Dual residual |c_B - y.B| = ",
3795 reduced_costs_.ComputeMaximumDualResidual());
3796}
3797
3798namespace {
3799
3800std::string StringifyMonomialWithFlags(const Fractional a,
3801 absl::string_view x) {
3802 return StringifyMonomial(
3803 a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3804}
3805
3806// Returns a string representing the rational approximation of x or a decimal
3807// approximation of x according to
3808// absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3809std::string StringifyWithFlags(const Fractional x) {
3810 return Stringify(x,
3811 absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3812}
3813
3814} // namespace
3815
3816std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3817 std::string output;
3818 VariableType variable_type = variables_info_.GetTypeRow()[col];
3819 VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3820 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3821 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3822 absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3823 variable_name_[col],
3824 StringifyWithFlags(variable_values_.Get(col)),
3825 GetVariableStatusString(variable_status),
3826 GetVariableTypeString(variable_type),
3827 StringifyWithFlags(lower_bounds[col]),
3828 StringifyWithFlags(upper_bounds[col]));
3829 return output;
3830}
3831
3832void RevisedSimplex::DisplayInfoOnVariables() const {
3833 if (VLOG_IS_ON(3)) {
3834 for (ColIndex col(0); col < num_cols_; ++col) {
3835 const Fractional variable_value = variable_values_.Get(col);
3836 const Fractional objective_coefficient = objective_[col];
3837 const Fractional objective_contribution =
3838 objective_coefficient * variable_value;
3839 VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3840 << StringifyWithFlags(variable_value) << " * "
3841 << StringifyWithFlags(objective_coefficient)
3842 << "(obj) = " << StringifyWithFlags(objective_contribution);
3843 }
3844 VLOG(3) << "------";
3845 }
3846}
3847
3848void RevisedSimplex::DisplayVariableBounds() {
3849 if (VLOG_IS_ON(3)) {
3850 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3851 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3852 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3853 for (ColIndex col(0); col < num_cols_; ++col) {
3854 switch (variable_type[col]) {
3856 break;
3858 VLOG(3) << variable_name_[col]
3859 << " >= " << StringifyWithFlags(lower_bounds[col]) << ";";
3860 break;
3862 VLOG(3) << variable_name_[col]
3863 << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3864 break;
3866 VLOG(3) << StringifyWithFlags(lower_bounds[col])
3867 << " <= " << variable_name_[col]
3868 << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3869 break;
3871 VLOG(3) << variable_name_[col] << " = "
3872 << StringifyWithFlags(lower_bounds[col]) << ";";
3873 break;
3874 default: // This should never happen.
3875 LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3876 break;
3877 }
3878 }
3879 }
3880}
3881
3882util_intops::StrongVector<RowIndex, SparseRow>
3884 util_intops::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3885 for (ColIndex col(0); col < num_cols_; ++col) {
3886 ComputeDirection(col);
3887 for (const auto e : direction_) {
3888 if (column_scales == nullptr) {
3889 dictionary[e.row()].SetCoefficient(col, e.coefficient());
3890 continue;
3891 }
3892 const Fractional numerator =
3893 col < column_scales->size() ? (*column_scales)[col] : 1.0;
3894 const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3895 ? (*column_scales)[GetBasis(e.row())]
3896 : 1.0;
3897 dictionary[e.row()].SetCoefficient(
3898 col, direction_[e.row()] * (numerator / denominator));
3899 }
3900 }
3901 return dictionary;
3902}
3903
3905 const LinearProgram& linear_program, const BasisState& state) {
3906 LoadStateForNextSolve(state);
3907 Status status = Initialize(linear_program);
3908 if (status.ok()) {
3909 variable_values_.RecomputeBasicVariableValues();
3910 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3911 }
3912}
3913
3914void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3915 if (VLOG_IS_ON(3)) {
3916 // This function has a complexity in O(num_non_zeros_in_matrix).
3917 DisplayInfoOnVariables();
3918
3919 std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3920 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
3921 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3922 absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3923 variable_name_[col]));
3924 }
3925 VLOG(3) << output << ";";
3926
3927 const RevisedSimplexDictionary dictionary(nullptr, this);
3928 RowIndex r(0);
3929 for (const SparseRow& row : dictionary) {
3930 output.clear();
3931 ColIndex basic_col = basis_[r];
3932 absl::StrAppend(&output, variable_name_[basic_col], " = ",
3933 StringifyWithFlags(variable_values_.Get(basic_col)));
3934 for (const SparseRowEntry e : row) {
3935 if (e.col() != basic_col) {
3936 absl::StrAppend(&output,
3937 StringifyMonomialWithFlags(e.coefficient(),
3938 variable_name_[e.col()]));
3939 }
3940 }
3941 VLOG(3) << output << ";";
3942 }
3943 VLOG(3) << "------";
3944 DisplayVariableBounds();
3945 ++r;
3946 }
3947}
3948
3949void RevisedSimplex::DisplayProblem() const {
3950 // This function has a complexity in O(num_rows * num_cols *
3951 // num_non_zeros_in_row).
3952 if (VLOG_IS_ON(3)) {
3953 DisplayInfoOnVariables();
3954 std::string output = "min: ";
3955 bool has_objective = false;
3956 for (ColIndex col(0); col < num_cols_; ++col) {
3957 const Fractional coeff = objective_[col];
3958 has_objective |= (coeff != 0.0);
3959 absl::StrAppend(&output,
3960 StringifyMonomialWithFlags(coeff, variable_name_[col]));
3961 }
3962 if (!has_objective) {
3963 absl::StrAppend(&output, " 0");
3964 }
3965 VLOG(3) << output << ";";
3966 for (RowIndex row(0); row < num_rows_; ++row) {
3967 output = "";
3968 for (ColIndex col(0); col < num_cols_; ++col) {
3969 absl::StrAppend(&output,
3970 StringifyMonomialWithFlags(
3971 compact_matrix_.column(col).LookUpCoefficient(row),
3972 variable_name_[col]));
3973 }
3974 VLOG(3) << output << " = 0;";
3975 }
3976 VLOG(3) << "------";
3977 }
3978}
3979
3980void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3981 DCHECK(time_limit != nullptr);
3982 const double current_deterministic_time = DeterministicTime();
3983 const double deterministic_time_delta =
3984 current_deterministic_time - last_deterministic_time_update_;
3985 time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3986 last_deterministic_time_update_ = current_deterministic_time;
3987}
3988
3989#undef DCHECK_COL_BOUNDS
3990#undef DCHECK_ROW_BOUNDS
3991
3992} // namespace glop
3993} // namespace operations_research
bool LoggingIsEnabled() const
Returns true iff logging is enabled.
Definition logging.h:49
void SetParameters(const GlopParameters &parameters)
Sets the parameters for this component.
void SetTimeLimit(TimeLimit *time_limit)
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
void SetParameters(const GlopParameters &parameters)
Sets the parameters.
const DenseRow & objective_coefficients() const
Returns the objective coefficients (or cost) of variables as a row vector.
Definition lp_data.h:231
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
void SetParameters(const GlopParameters &parameters)
Sets the pricing parameters. This does not change the pricing rule.
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
ConstraintStatus GetConstraintStatus(RowIndex row) const
void LoadStateForNextSolve(const BasisState &state)
Uses the given state as a warm-start for the next Solve() call.
Fractional GetDualValue(RowIndex row) const
Fractional GetReducedCost(ColIndex col) const
Fractional GetConstraintActivity(RowIndex row) const
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
const DenseRow & GetDualRayRowCombination() const
This is the "dual ray" linear combination of the matrix rows.
RowIndex GetProblemNumRows() const
Getters to retrieve all the information computed by the last Solve().
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
std::string StatString()
Returns statistics about this class as a string.
const BasisFactorization & GetBasisFactorization() const
VariableStatus GetVariableStatus(ColIndex col) const
void SetParameters(const GlopParameters &parameters)
Sets or gets the algorithm parameters to be used on the next Solve().
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
ABSL_MUST_USE_RESULT Status MinimizeFromTransposedMatrixWithSlack(const DenseRow &objective, Fractional objective_scaling_factor, Fractional objective_offset, TimeLimit *time_limit)
void SetStartingVariableValuesForNextSolve(const DenseRow &values)
static Status OK()
Improves readability but identical to 0-arg constructor.
Definition status.h:55
@ ERROR_LU
The LU factorization of the current basis couldn't be computed.
Definition status.h:34
StrictITISpan< RowIndex, const Fractional > ConstView
Definition lp_types.h:291
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
Fractional Get(ColIndex col) const
Getters for the variable values.
const DenseBitRow & GetCanDecreaseBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetIsRelevantBitRow() const
time_limit
Definition solve.cc:22
constexpr ColIndex kInvalidCol(-1)
constexpr double kInfinity
Infinity for type Fractional.
Definition lp_types.h:87
std::string GetVariableTypeString(VariableType variable_type)
Returns the string representation of the VariableType enum.
Definition lp_types.cc:56
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition lp_types.h:394
Fractional Square(Fractional f)
Definition lp_utils.h:41
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Column of booleans.
Definition lp_types.h:383
Permutation< ColIndex > ColumnPermutation
std::string Stringify(const Fractional x, bool fraction)
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:111
Bitset64< ColIndex > DenseBitRow
Row of bits.
Definition lp_types.h:375
const DenseRow & Transpose(const DenseColumn &col)
Similar comment as the other Transpose() implementation above.
Definition lp_utils.h:199
std::string GetVariableStatusString(VariableStatus status)
Returns the string representation of the VariableStatus enum.
Definition lp_types.cc:75
Index ColToIntIndex(ColIndex col)
Get the integer index corresponding to the col.
Definition lp_types.h:60
constexpr const uint64_t kDeterministicSeed
ColIndex RowToColIndex(RowIndex row)
Get the ColIndex corresponding to the column # row.
Definition lp_types.h:54
bool IsFinite(Fractional value)
Definition lp_types.h:94
constexpr RowIndex kInvalidRow(-1)
VariableType
Different types of variables.
Definition lp_types.h:178
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Row of variable types.
Definition lp_types.h:369
Fractional InfinityNorm(const DenseColumn &v)
Returns the maximum of the coefficients of 'v'.
Definition lp_utils.cc:151
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
Changes the sign of all the entries in the given vector.
Definition lp_utils.h:312
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Row of variable statuses.
Definition lp_types.h:372
std::string StringifyMonomial(const Fractional a, absl::string_view x, bool fraction)
StrictITIVector< RowIndex, Fractional > DenseColumn
Column-vector types. Column-vector types are indexed by a row index.
Definition lp_types.h:380
StrictITIVector< ColIndex, Fractional > DenseRow
Row-vector types. Row-vector types are indexed by a column index.
Definition lp_types.h:351
ProblemStatus
Different statuses for a given problem.
Definition lp_types.h:105
@ ABNORMAL
An error occurred during the solving process.
Definition lp_types.h:158
@ INIT
The solver didn't had a chance to prove anything.
Definition lp_types.h:146
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Returns the ConstraintStatus corresponding to a given VariableStatus.
Definition lp_types.cc:113
static double DeterministicTimeForFpOperations(int64_t n)
Definition lp_types.h:433
void ApplyColumnPermutationToRowIndexedVector(StrictITISpan< ColIndex, const ColIndex > col_perm, RowIndexedVector *v, RowIndexedVector *tmp)
std::string GetProblemStatusString(ProblemStatus problem_status)
Returns the string representation of the ProblemStatus enum.
Definition lp_types.cc:23
In SWIG mode, we don't want anything besides these top-level includes.
util_intops::StrongVector< RowEntryIndex, SubsetIndex > SparseRow
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition stats.h:414
STL namespace.
#define RETURN_IF_NULL(x)
#define DCHECK_ROW_BOUNDS(row)
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
#define DCHECK_COL_BOUNDS(col)
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
#define SCOPED_TIME_STAT(stats)
Definition stats.h:418
#define GLOP_RETURN_IF_ERROR(function_call)
Macro to simplify error propagation between function returning Status.
Definition status.h:71
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Macro to check that a pointer argument is not null.
Definition status.h:86
#define SOLVER_LOG(logger,...)
Definition logging.h:109