Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
revised_simplex.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <cstdlib>
20#include <functional>
21#include <string>
22#include <utility>
23#include <vector>
24
25#include "absl/flags/flag.h"
26#include "absl/log/check.h"
27#include "absl/log/vlog_is_on.h"
28#include "absl/random/bit_gen_ref.h"
29#include "absl/random/random.h"
30#include "absl/random/seed_sequences.h"
31#include "absl/strings/match.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/str_format.h"
34#include "absl/strings/string_view.h"
40#include "ortools/glop/status.h"
54#include "ortools/util/stats.h"
56
57ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
58 "Display numbers as fractions.");
59ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
60 "Stop after first basis has been computed.");
61ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
62 "Stop after first phase has been completed.");
63ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
64
65namespace operations_research {
66namespace glop {
67namespace {
68
69// Calls the given closure upon destruction. It can be used to ensure that a
70// closure is executed whenever a function returns.
71class Cleanup {
72 public:
73 explicit Cleanup(std::function<void()> closure)
74 : closure_(std::move(closure)) {}
75 ~Cleanup() { closure_(); }
76
77 private:
78 std::function<void()> closure_;
79};
80} // namespace
81
82#define DCHECK_COL_BOUNDS(col) \
83 { \
84 DCHECK_LE(0, col); \
85 DCHECK_GT(num_cols_, col); \
86 }
87
88// TODO(user): Remove this function.
89#define DCHECK_ROW_BOUNDS(row) \
90 { \
91 DCHECK_LE(0, row); \
92 DCHECK_GT(num_rows_, row); \
93 }
94
95constexpr const uint64_t kDeterministicSeed = 42;
96
97namespace {
98
99bool UseAbslRandom(const GlopParameters& parameters) {
100 return parameters.use_absl_random();
101}
102
103} // namespace
104
106 : problem_status_(ProblemStatus::INIT),
107 objective_(),
108 basis_(),
109 variable_name_(),
110 direction_(),
111 error_(),
112 random_(absl::BitGenRef(deterministic_random_)),
113 basis_factorization_(&compact_matrix_, &basis_),
114 variables_info_(compact_matrix_),
115 primal_edge_norms_(compact_matrix_, variables_info_,
116 basis_factorization_),
117 dual_edge_norms_(basis_factorization_),
118 dual_prices_(random_),
119 variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
120 basis_factorization_, &dual_edge_norms_, &dual_prices_),
121 update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
122 basis_factorization_),
123 reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
124 basis_factorization_, random_),
125 entering_variable_(variables_info_, random_, &reduced_costs_),
126 primal_prices_(random_, variables_info_, &primal_edge_norms_,
127 &reduced_costs_),
128 iteration_stats_(),
129 ratio_test_stats_(),
130 function_stats_("SimplexFunctionStats"),
131 parameters_(),
132 test_lu_() {
133 SetParameters(parameters_);
134}
135
137 SCOPED_TIME_STAT(&function_stats_);
138 solution_state_.statuses.clear();
139 variable_starting_values_.clear();
140}
141
143 // We avoid marking the state as set externally if it is the same as the
144 // current one.
145 //
146 // TODO(user): Add comparison operator.
147 if (state.statuses == solution_state_.statuses) return;
148
149 solution_state_ = state;
150 solution_state_has_been_set_externally_ = true;
151}
152
154 const DenseRow& values) {
155 variable_starting_values_ = values;
156}
157
159 const DenseRow& objective, Fractional objective_scaling_factor,
160 Fractional objective_offset, TimeLimit* time_limit) {
161 const double start_time = time_limit->GetElapsedTime();
162 default_logger_.EnableLogging(parameters_.log_search_progress());
163 default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
164 parameters_ = initial_parameters_;
165 PropagateParameters();
166
167 // The source of truth is the transposed matrix.
168 if (transpose_was_changed_) {
169 compact_matrix_.PopulateFromTranspose(transposed_matrix_);
170 num_rows_ = compact_matrix_.num_rows();
171 num_cols_ = compact_matrix_.num_cols();
172 first_slack_col_ = num_cols_ - RowToColIndex(num_rows_);
173 }
174
175 DCHECK_EQ(num_cols_, objective.size());
176
177 // Copy objective
178 objective_scaling_factor_ = objective_scaling_factor;
179 objective_offset_ = objective_offset;
180 const bool objective_is_unchanged = objective_ == objective;
181 objective_ = objective;
182 InitializeObjectiveLimit();
183
184 // Initialize variable infos from the mutated bounds.
185 variables_info_.InitializeFromMutatedState();
186
187 if (objective_is_unchanged && parameters_.use_dual_simplex() &&
188 !transpose_was_changed_ && !solution_state_has_been_set_externally_ &&
189 !solution_state_.IsEmpty()) {
190 // Fast track if we just changed variable bounds.
191 primal_edge_norms_.Clear();
192 variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
193 solution_state_);
194 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
195 variable_values_.RecomputeBasicVariableValues();
196 return SolveInternal(start_time, false, objective, time_limit);
197 } else {
198 GLOP_RETURN_IF_ERROR(FinishInitialization(true));
199 }
200
201 return SolveInternal(start_time, false, objective, time_limit);
202}
203
205 const double start_time = time_limit->GetElapsedTime();
206 default_logger_.EnableLogging(parameters_.log_search_progress());
207 default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
208
209 DCHECK(lp.IsCleanedUp());
210 GLOP_RETURN_IF_ERROR(Initialize(lp));
211 return SolveInternal(start_time, lp.IsMaximizationProblem(),
213}
214
215ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal(
216 double start_time, bool is_maximization_problem,
217 const DenseRow& objective_coefficients, TimeLimit* time_limit) {
218 SCOPED_TIME_STAT(&function_stats_);
220 Cleanup update_deterministic_time_on_return(
221 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
222
223 SOLVER_LOG(logger_, "");
224 primal_edge_norms_.SetTimeLimit(time_limit);
225 dual_edge_norms_.SetTimeLimit(time_limit);
226
227 if (logger_->LoggingIsEnabled()) {
228 DisplayBasicVariableStatistics();
229 }
230
231 dual_infeasibility_improvement_direction_.clear();
232 update_row_.Invalidate();
233 test_lu_.Clear();
234 problem_status_ = ProblemStatus::INIT;
235 phase_ = Phase::FEASIBILITY;
236 num_iterations_ = 0;
237 num_feasibility_iterations_ = 0;
238 num_optimization_iterations_ = 0;
239 num_push_iterations_ = 0;
240 feasibility_time_ = 0.0;
241 optimization_time_ = 0.0;
242 push_time_ = 0.0;
243 total_time_ = 0.0;
244
245 // In case we abort because of an error, we cannot assume that the current
246 // solution state will be in sync with all our internal data structure. In
247 // case we abort without resetting it, setting this allow us to still use the
248 // previous state info, but we will double-check everything.
249 solution_state_has_been_set_externally_ = true;
250
251 if (VLOG_IS_ON(2)) {
252 ComputeNumberOfEmptyRows();
253 ComputeNumberOfEmptyColumns();
254 DisplayProblem();
255 }
256 if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
257 DisplayAllStats();
258 return Status::OK();
259 }
260
261 const bool use_dual = parameters_.use_dual_simplex();
262
263 // TODO(user): Avoid doing the first phase checks when we know from the
264 // incremental solve that the solution is already dual or primal feasible.
265 SOLVER_LOG(logger_, "");
266 primal_edge_norms_.SetPricingRule(parameters_.feasibility_rule());
267 if (use_dual) {
268 if (parameters_.perturb_costs_in_dual_simplex()) {
269 reduced_costs_.PerturbCosts();
270 }
271
272 if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
273 variables_info_.MakeBoxedVariableRelevant(false);
275 DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
276
277 if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
278 // Note(user): In most cases, the matrix will already be refactorized
279 // and both Refactorize() and PermuteBasis() will do nothing. However,
280 // if the time limit is reached during the first phase, this might not
281 // be the case and RecomputeBasicVariableValues() below DCHECKs that the
282 // matrix is refactorized. This is not required, but we currently only
283 // want to recompute values from scratch when the matrix was just
284 // refactorized to maximize precision.
285 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
286 PermuteBasis();
287
288 variables_info_.MakeBoxedVariableRelevant(true);
289 reduced_costs_.MakeReducedCostsPrecise();
290
291 // This is needed to display errors properly.
292 MakeBoxedVariableDualFeasible(
293 variables_info_.GetNonBasicBoxedVariables(),
294 /*update_basic_values=*/false);
295 variable_values_.RecomputeBasicVariableValues();
296 }
297 } else {
298 // Test initial dual infeasibility, ignoring boxed variables. We currently
299 // refactorize/recompute the reduced costs if not already done.
300 // TODO(user): Not ideal in an incremental setting.
301 reduced_costs_.MakeReducedCostsPrecise();
302 bool refactorize = reduced_costs_.NeedsBasisRefactorization();
303 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
304
305 const Fractional initial_infeasibility =
306 reduced_costs_.ComputeMaximumDualInfeasibilityOnNonBoxedVariables();
307 if (initial_infeasibility <
308 reduced_costs_.GetDualFeasibilityTolerance()) {
309 SOLVER_LOG(logger_, "Initial basis is dual feasible.");
310 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
311 MakeBoxedVariableDualFeasible(
312 variables_info_.GetNonBasicBoxedVariables(),
313 /*update_basic_values=*/false);
314 variable_values_.RecomputeBasicVariableValues();
315 } else {
316 // Transform problem and recompute variable values.
317 variables_info_.TransformToDualPhaseIProblem(
318 reduced_costs_.GetDualFeasibilityTolerance(),
319 reduced_costs_.GetReducedCosts());
320 DenseRow zero; // We want the FREE variable at zero here.
321 variable_values_.ResetAllNonBasicVariableValues(zero);
322 variable_values_.RecomputeBasicVariableValues();
323
324 // Optimize.
325 DisplayErrors();
326 GLOP_RETURN_IF_ERROR(DualMinimize(false, time_limit));
327
328 // Restore original problem and recompute variable values. Note that we
329 // need the reduced cost on the fixed positions here.
330 variables_info_.EndDualPhaseI(
331 reduced_costs_.GetDualFeasibilityTolerance(),
332 reduced_costs_.GetFullReducedCosts());
333 variable_values_.ResetAllNonBasicVariableValues(
334 variable_starting_values_);
335 variable_values_.RecomputeBasicVariableValues();
336
337 // TODO(user): Note that if there was cost shifts, we just keep them
338 // until the end of the optim.
339 //
340 // TODO(user): What if slightly infeasible? we shouldn't really stop.
341 // Call primal ? use higher tolerance ? It seems we can always kind of
342 // continue and deal with the issue later. Find a way other than this +
343 // 1e-6 hack.
344 if (problem_status_ == ProblemStatus::OPTIMAL) {
345 if (reduced_costs_.ComputeMaximumDualInfeasibility() <
346 reduced_costs_.GetDualFeasibilityTolerance() + 1e-6) {
347 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
348 } else {
349 SOLVER_LOG(logger_, "Infeasible after first phase.");
350 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
351 }
352 }
353 }
354 }
355 } else {
356 GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
357
358 // After the primal phase I, we need to restore the objective.
359 if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
360 objective_ = objective_coefficients;
361 if (is_maximization_problem) {
362 for (Fractional& value : objective_) {
363 value = -value;
364 }
365 }
366 objective_.resize(num_cols_, 0.0); // For the slack.
367 reduced_costs_.ResetForNewObjective();
368 }
369 }
370
371 DisplayErrors();
372
373 phase_ = Phase::OPTIMIZATION;
374 feasibility_time_ = time_limit->GetElapsedTime() - start_time;
375 primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
376 num_feasibility_iterations_ = num_iterations_;
377
378 // Because of shifts or perturbations, we may need to re-run a dual simplex
379 // after the primal simplex finished, or the opposite.
380 //
381 // We alter between solving with primal and dual Phase II algorithm as long as
382 // time limit permits *and* we did not yet achieve the desired precision.
383 // I.e., we run iteration i if the solution from iteration i-1 was not precise
384 // after we removed the bound and cost shifts and perturbations.
385 //
386 // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
387 // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
388 // (i.e., these statuses are not necesserily a consequence of hitting a time
389 // limit).
390 SOLVER_LOG(logger_, "");
391 for (int num_optims = 0;
392 // We want to enter the loop when both num_optims and num_iterations_ are
393 // *equal* to the corresponding limits (to return a meaningful status
394 // when the limits are set to 0).
395 num_optims <= parameters_.max_number_of_reoptimizations() &&
396 !objective_limit_reached_ &&
397 (num_iterations_ == 0 ||
398 num_iterations_ < parameters_.max_number_of_iterations()) &&
399 !time_limit->LimitReached() &&
400 !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
401 (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
402 problem_status_ == ProblemStatus::DUAL_FEASIBLE);
403 ++num_optims) {
404 if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
405 // Run the primal simplex.
406 GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
407 } else {
408 // Run the dual simplex.
410 DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
411 }
412
413 // PrimalMinimize() or DualMinimize() always double check the result with
414 // maximum precision by refactoring the basis before exiting (except if an
415 // iteration or time limit was reached).
416 DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
417 problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
418 basis_factorization_.IsRefactorized());
419
420 // If SetIntegralityScale() was called, we perform a polish operation.
421 if (!integrality_scale_.empty() &&
422 problem_status_ == ProblemStatus::OPTIMAL) {
424 }
425
426 // Remove the bound and cost shifts (or perturbations).
427 //
428 // Note(user): Currently, we never do both at the same time, so we could
429 // be a bit faster here, but then this is quick anyway.
430 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
431 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
432 PermuteBasis();
433 variable_values_.RecomputeBasicVariableValues();
434 reduced_costs_.ClearAndRemoveCostShifts();
435
436 DisplayErrors();
437
438 // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
439 // status by checking with the other phase I that the problem is really
440 // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
441 // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
442 // OPTIMAL and the dual does not have issues on this problem.
443 //
444 // TODO(user): There is another issue on infeas/qual.mps. I think we should
445 // just check the dual ray, not really the current solution dual
446 // feasibility.
447 if (problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
448 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
449 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
450 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
451 variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
452 SOLVER_LOG(logger_,
453 "PRIMAL_UNBOUNDED was reported, but the residual and/or "
454 "dual infeasibility is above the tolerance");
455 if (parameters_.change_status_to_imprecise()) {
456 problem_status_ = ProblemStatus::IMPRECISE;
457 }
458 break;
459 }
460
461 // All of our tolerance are okay, but the dual ray might be fishy. This
462 // happens on l30.mps and on L1_sixm250obs.mps.gz. If the ray do not
463 // seems good enough, we might actually just be at the optimal and have
464 // trouble going down to our relatively low default tolerances.
465 //
466 // The difference bettween optimal and unbounded can be thin. Say you
467 // have a free variable with no constraint and a cost of epsilon,
468 // depending on epsilon and your tolerance, this will either cause the
469 // problem to be unbounded, or can be ignored.
470 //
471 // Here, we compute what is the cost gain if we move from the current
472 // value with the ray up to the bonds + tolerance. If this gain is < 1,
473 // it is hard to claim we are really unbounded. This is a quick
474 // heuristic to error on the side of optimality rather than
475 // unboundedness.
476 Fractional max_magnitude = 0.0;
477 Fractional min_distance = kInfinity;
478 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
479 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
480 Fractional cost_delta = 0.0;
481 for (ColIndex col(0); col < num_cols_; ++col) {
482 cost_delta += solution_primal_ray_[col] * objective_[col];
483 if (solution_primal_ray_[col] > 0 && upper_bounds[col] != kInfinity) {
484 const Fractional value = variable_values_.Get(col);
485 const Fractional distance = (upper_bounds[col] - value + tolerance) /
486 solution_primal_ray_[col];
487 min_distance = std::min(distance, min_distance);
488 max_magnitude = std::max(solution_primal_ray_[col], max_magnitude);
489 }
490 if (solution_primal_ray_[col] < 0 && lower_bounds[col] != -kInfinity) {
491 const Fractional value = variable_values_.Get(col);
492 const Fractional distance = (value - lower_bounds[col] + tolerance) /
493 -solution_primal_ray_[col];
494 min_distance = std::min(distance, min_distance);
495 max_magnitude = std::max(-solution_primal_ray_[col], max_magnitude);
496 }
497 }
498 SOLVER_LOG(logger_, "Primal unbounded ray: max blocking magnitude = ",
499 max_magnitude, ", min distance to bound + ", tolerance, " = ",
500 min_distance, ", ray cost delta = ", cost_delta);
501 if (min_distance * std::abs(cost_delta) < 1 &&
502 reduced_costs_.ComputeMaximumDualInfeasibility() <= tolerance) {
503 SOLVER_LOG(logger_,
504 "PRIMAL_UNBOUNDED was reported, but the tolerance are good "
505 "and the unbounded ray is not great.");
506 SOLVER_LOG(logger_,
507 "The difference between unbounded and optimal can depend "
508 "on a slight change of tolerance, trying to see if we are "
509 "at OPTIMAL after postsolve.");
510 problem_status_ = ProblemStatus::OPTIMAL;
511 }
512 break;
513 }
514 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
515 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
516 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
517 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
518 reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
519 SOLVER_LOG(logger_,
520 "DUAL_UNBOUNDED was reported, but the residual and/or "
521 "dual infeasibility is above the tolerance");
522 if (parameters_.change_status_to_imprecise()) {
523 problem_status_ = ProblemStatus::IMPRECISE;
524 }
525 }
526
527 // Validate the dual ray that prove primal infeasibility.
528 //
529 // By taking the linear combination of the constraint, we should arrive
530 // to an infeasible <= 0 constraint using the variable bounds.
531 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
532 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
533 Fractional implied_lb = 0.0;
534 Fractional error = 0.0;
535 for (ColIndex col(0); col < num_cols_; ++col) {
536 const Fractional coeff = solution_dual_ray_row_combination_[col];
537 if (coeff > 0) {
538 if (lower_bounds[col] == -kInfinity) {
539 error = std::max(error, coeff);
540 } else {
541 implied_lb += coeff * lower_bounds[col];
542 }
543 } else if (coeff < 0) {
544 if (upper_bounds[col] == kInfinity) {
545 error = std::max(error, -coeff);
546 } else {
547 implied_lb += coeff * upper_bounds[col];
548 }
549 }
550 }
551 SOLVER_LOG(logger_, "Dual ray error=", error,
552 " infeasibility=", implied_lb);
553 if (implied_lb < tolerance || error > tolerance) {
554 SOLVER_LOG(logger_,
555 "DUAL_UNBOUNDED was reported, but the dual ray is not "
556 "proving infeasibility with high enough tolerance");
557 if (parameters_.change_status_to_imprecise()) {
558 problem_status_ = ProblemStatus::IMPRECISE;
559 }
560 }
561 break;
562 }
563
564 // Change the status, if after the shift and perturbation removal the
565 // problem is not OPTIMAL anymore.
566 if (problem_status_ == ProblemStatus::OPTIMAL) {
567 const Fractional solution_tolerance =
568 parameters_.solution_feasibility_tolerance();
569 const Fractional primal_residual =
570 variable_values_.ComputeMaximumPrimalResidual();
571 const Fractional dual_residual =
572 reduced_costs_.ComputeMaximumDualResidual();
573 if (primal_residual > solution_tolerance ||
574 dual_residual > solution_tolerance) {
575 SOLVER_LOG(logger_,
576 "OPTIMAL was reported, yet one of the residuals is "
577 "above the solution feasibility tolerance after the "
578 "shift/perturbation are removed.");
579 if (parameters_.change_status_to_imprecise()) {
580 problem_status_ = ProblemStatus::IMPRECISE;
581 }
582 } else {
583 // We use the "precise" tolerances here to try to report the best
584 // possible solution. Note however that we cannot really hope for an
585 // infeasibility lower than its corresponding residual error. Note that
586 // we already adapt the tolerance like this during the simplex
587 // execution.
588 const Fractional primal_tolerance =
589 std::max(primal_residual,
590 Fractional(parameters_.primal_feasibility_tolerance()));
591 const Fractional dual_tolerance =
592 std::max(dual_residual,
593 Fractional(parameters_.dual_feasibility_tolerance()));
594 const Fractional primal_infeasibility =
595 variable_values_.ComputeMaximumPrimalInfeasibility();
596 const Fractional dual_infeasibility =
597 reduced_costs_.ComputeMaximumDualInfeasibility();
598 if (primal_infeasibility > primal_tolerance &&
599 dual_infeasibility > dual_tolerance) {
600 SOLVER_LOG(logger_,
601 "OPTIMAL was reported, yet both of the infeasibility "
602 "are above the tolerance after the "
603 "shift/perturbation are removed.");
604 if (parameters_.change_status_to_imprecise()) {
605 problem_status_ = ProblemStatus::IMPRECISE;
606 }
607 } else if (primal_infeasibility > primal_tolerance) {
608 if (num_optims == parameters_.max_number_of_reoptimizations()) {
609 SOLVER_LOG(logger_,
610 "The primal infeasibility is still higher than the "
611 "requested internal tolerance, but the maximum "
612 "number of optimization is reached.");
613 break;
614 }
615 SOLVER_LOG(logger_, "");
616 SOLVER_LOG(logger_, "Re-optimizing with dual simplex ... ");
617 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
618 } else if (dual_infeasibility > dual_tolerance) {
619 if (num_optims == parameters_.max_number_of_reoptimizations()) {
620 SOLVER_LOG(logger_,
621 "The dual infeasibility is still higher than the "
622 "requested internal tolerance, but the maximum "
623 "number of optimization is reached.");
624 break;
625 }
626 SOLVER_LOG(logger_, "");
627 SOLVER_LOG(logger_, "Re-optimizing with primal simplex ... ");
628 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
629 }
630 }
631 }
632 }
633
634 // Check that the return status is "precise".
635 //
636 // TODO(user): we currently skip the DUAL_INFEASIBLE status because the
637 // quantities are not up to date in this case.
638 if (parameters_.change_status_to_imprecise() &&
639 problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
640 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
641 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
642 reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
643 problem_status_ = ProblemStatus::IMPRECISE;
644 } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
645 problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
646 problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
647 if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
648 problem_status_ = ProblemStatus::IMPRECISE;
649 }
650 } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
651 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
652 problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
653 if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
654 problem_status_ = ProblemStatus::IMPRECISE;
655 }
656 }
657 }
658
659 total_time_ = time_limit->GetElapsedTime() - start_time;
660 optimization_time_ = total_time_ - feasibility_time_;
661 num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
662
663 // If the user didn't provide starting variable values, then there is no need
664 // to check for super-basic variables.
665 if (!variable_starting_values_.empty()) {
666 const int num_super_basic = ComputeNumberOfSuperBasicVariables();
667 if (num_super_basic > 0) {
668 SOLVER_LOG(logger_,
669 "Num super-basic variables left after optimize phase: ",
670 num_super_basic);
671 if (parameters_.push_to_vertex()) {
672 if (problem_status_ == ProblemStatus::OPTIMAL) {
673 SOLVER_LOG(logger_, "");
674 phase_ = Phase::PUSH;
675 GLOP_RETURN_IF_ERROR(PrimalPush(time_limit));
676 // TODO(user): We should re-check for feasibility at this point and
677 // apply clean-up as needed.
678 } else {
679 SOLVER_LOG(logger_,
680 "Skipping push phase because optimize didn't succeed.");
681 }
682 }
683 }
684 }
685
686 total_time_ = time_limit->GetElapsedTime() - start_time;
687 push_time_ = total_time_ - feasibility_time_ - optimization_time_;
688 num_push_iterations_ = num_iterations_ - num_feasibility_iterations_ -
689 num_optimization_iterations_;
690
691 // Store the result for the solution getters.
692 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
693 solution_dual_values_ = reduced_costs_.GetDualValues();
694 solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
695 SaveState();
696
697 if (is_maximization_problem) {
698 ChangeSign(&solution_dual_values_);
699 ChangeSign(&solution_reduced_costs_);
700 }
701
702 // If the problem is unbounded, set the objective value to +/- infinity.
703 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
704 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
705 solution_objective_value_ =
706 (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
707 : -kInfinity;
708 if (is_maximization_problem) {
709 solution_objective_value_ = -solution_objective_value_;
710 }
711 }
712
713 variable_starting_values_.clear();
714 DisplayAllStats();
715 return Status::OK();
716}
717
719 return problem_status_;
720}
721
723 return solution_objective_value_;
724}
725
727 return num_iterations_;
728}
729
730RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
731
732ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
733
735 return variable_values_.Get(col);
736}
737
739 return solution_reduced_costs_[col];
740}
741
743 return solution_reduced_costs_;
744}
745
747 return solution_dual_values_[row];
748}
749
751 return variables_info_.GetStatusRow()[col];
752}
753
754const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
755
757 // Note the negative sign since the slack variable is such that
758 // constraint_activity + slack_value = 0.
759 return -variable_values_.Get(SlackColIndex(row));
760}
761
763 // The status of the given constraint is the same as the status of the
764 // associated slack variable with a change of sign.
765 const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
768 }
771 }
773}
774
776 DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
777 return solution_primal_ray_;
778}
780 DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
781 return solution_dual_ray_;
782}
783
785 DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
786 return solution_dual_ray_row_combination_;
787}
788
789ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
790
792 DCHECK(basis_factorization_.GetColumnPermutation().empty());
793 return basis_factorization_;
794}
795
796std::string RevisedSimplex::GetPrettySolverStats() const {
797 return absl::StrFormat(
798 "Problem status : %s\n"
799 "Solving time : %-6.4g\n"
800 "Number of iterations : %u\n"
801 "Time for solvability (first phase) : %-6.4g\n"
802 "Number of iterations for solvability : %u\n"
803 "Time for optimization : %-6.4g\n"
804 "Number of iterations for optimization : %u\n"
805 "Stop after first basis : %d\n",
806 GetProblemStatusString(problem_status_), total_time_, num_iterations_,
807 feasibility_time_, num_feasibility_iterations_, optimization_time_,
808 num_optimization_iterations_,
809 absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
810}
811
813 // TODO(user): Count what is missing.
814 return DeterministicTimeForFpOperations(num_update_price_operations_) +
815 basis_factorization_.DeterministicTime() +
816 update_row_.DeterministicTime() +
817 entering_variable_.DeterministicTime() +
818 reduced_costs_.DeterministicTime() +
819 primal_edge_norms_.DeterministicTime();
820}
821
822void RevisedSimplex::SetVariableNames() {
823 variable_name_.resize(num_cols_, "");
824 for (ColIndex col(0); col < first_slack_col_; ++col) {
825 const ColIndex var_index = col + 1;
826 variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
827 }
828 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
829 const ColIndex var_index = col - first_slack_col_ + 1;
830 variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
831 }
832}
833
834void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
835 ColIndex col, VariableStatus status) {
836 variables_info_.UpdateToNonBasicStatus(col, status);
837 variable_values_.SetNonBasicVariableValueFromStatus(col);
838}
839
840bool RevisedSimplex::BasisIsConsistent() const {
841 const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
842 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
843 for (RowIndex row(0); row < num_rows_; ++row) {
844 const ColIndex col = basis_[row];
845 if (!is_basic.IsSet(col)) return false;
846 if (variable_statuses[col] != VariableStatus::BASIC) return false;
847 }
848 ColIndex cols_in_basis(0);
849 ColIndex cols_not_in_basis(0);
850 for (ColIndex col(0); col < num_cols_; ++col) {
851 cols_in_basis += is_basic.IsSet(col);
852 cols_not_in_basis += !is_basic.IsSet(col);
853 if (is_basic.IsSet(col) !=
854 (variable_statuses[col] == VariableStatus::BASIC)) {
855 return false;
856 }
857 }
858 if (cols_in_basis != RowToColIndex(num_rows_)) return false;
859 if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
860 return true;
861}
862
863// Note(user): The basis factorization is not updated by this function but by
864// UpdateAndPivot().
865void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
866 VariableStatus leaving_variable_status) {
867 SCOPED_TIME_STAT(&function_stats_);
868 DCHECK_COL_BOUNDS(entering_col);
869 DCHECK_ROW_BOUNDS(basis_row);
870
871 // Check that this is not called with an entering_col already in the basis
872 // and that the leaving col is indeed in the basis.
873 DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
874 DCHECK_NE(basis_[basis_row], entering_col);
875 DCHECK_NE(basis_[basis_row], kInvalidCol);
876
877 const ColIndex leaving_col = basis_[basis_row];
878 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
879
880 // Make leaving_col leave the basis and update relevant data.
881 // Note that the leaving variable value is not necessarily at its exact
882 // bound, which is like a bound shift.
883 variables_info_.UpdateToNonBasicStatus(leaving_col, leaving_variable_status);
884 DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
885 leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
886 leaving_variable_status == VariableStatus::FIXED_VALUE);
887
888 basis_[basis_row] = entering_col;
889 variables_info_.UpdateToBasicStatus(entering_col);
890 update_row_.Invalidate();
891}
892
893namespace {
894
895// Comparator used to sort column indices according to a given value vector.
896class ColumnComparator {
897 public:
898 explicit ColumnComparator(const DenseRow& value) : value_(value) {}
899 bool operator()(ColIndex col_a, ColIndex col_b) const {
900 return value_[col_a] < value_[col_b];
901 }
902
903 private:
904 const DenseRow& value_;
905};
906
907} // namespace
908
909// To understand better what is going on in this function, let us say that this
910// algorithm will produce the optimal solution to a problem containing only
911// singleton columns (provided that the variables start at the minimum possible
912// cost, see DefaultVariableStatus()). This is unit tested.
913//
914// The error_ must be equal to the constraint activity for the current variable
915// values before this function is called. If error_[row] is 0.0, that mean this
916// constraint is currently feasible.
917void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
918 SCOPED_TIME_STAT(&function_stats_);
919 // Computes the singleton columns and the cost variation of the corresponding
920 // variables (in the only possible direction, i.e. away from its current
921 // bound) for a unit change in the infeasibility of the corresponding row.
922 //
923 // Note that the slack columns will be treated as normal singleton columns.
924 std::vector<ColIndex> singleton_column;
925 DenseRow cost_variation(num_cols_, 0.0);
926 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
927 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
928 for (ColIndex col(0); col < num_cols_; ++col) {
929 if (compact_matrix_.column(col).num_entries() != 1) continue;
930 if (lower_bounds[col] == upper_bounds[col]) continue;
931 const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
932 if (variable_values_.Get(col) == lower_bounds[col]) {
933 cost_variation[col] = objective_[col] / std::abs(slope);
934 } else {
935 cost_variation[col] = -objective_[col] / std::abs(slope);
936 }
937 singleton_column.push_back(col);
938 }
939 if (singleton_column.empty()) return;
940
941 // Sort the singleton columns for the case where many of them correspond to
942 // the same row (equivalent to a piecewise-linear objective on this variable).
943 // Negative cost_variation first since moving the singleton variable away from
944 // its current bound means the least decrease in the objective function for
945 // the same "error" variation.
946 ColumnComparator comparator(cost_variation);
947 std::sort(singleton_column.begin(), singleton_column.end(), comparator);
948 DCHECK_LE(cost_variation[singleton_column.front()],
949 cost_variation[singleton_column.back()]);
950
951 // Use a singleton column to "absorb" the error when possible to avoid
952 // introducing unneeded artificial variables. Note that with scaling on, the
953 // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
954 // them) and that the SingletonColumnSignPreprocessor makes them all positive.
955 // However, this code works for any coefficient value.
956 const DenseRow& variable_values = variable_values_.GetDenseRow();
957 for (const ColIndex col : singleton_column) {
958 const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
959
960 // If no singleton columns have entered the basis for this row, choose the
961 // first one. It will be the one with the least decrease in the objective
962 // function when it leaves the basis.
963 if ((*basis)[row] == kInvalidCol) {
964 (*basis)[row] = col;
965 }
966
967 // If there is already no error in this row (i.e. it is primal-feasible),
968 // there is nothing to do.
969 if (error_[row] == 0.0) continue;
970
971 // In this case, all the infeasibility can be "absorbed" and this variable
972 // may not be at one of its bound anymore, so we have to use it in the
973 // basis.
974 const Fractional coeff =
975 compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
976 const Fractional new_value = variable_values[col] + error_[row] / coeff;
977 if (new_value >= lower_bounds[col] && new_value <= upper_bounds[col]) {
978 error_[row] = 0.0;
979
980 // Use this variable in the initial basis.
981 (*basis)[row] = col;
982 continue;
983 }
984
985 // The idea here is that if the singleton column cannot be used to "absorb"
986 // all error_[row], if it is boxed, it can still be used to make the
987 // infeasibility smaller (with a bound flip).
988 const Fractional box_width = variables_info_.GetBoundDifference(col);
989 DCHECK_NE(box_width, 0.0);
990 DCHECK_NE(error_[row], 0.0);
991 const Fractional error_sign = error_[row] / coeff;
992 if (variable_values[col] == lower_bounds[col] && error_sign > 0.0) {
993 DCHECK(IsFinite(box_width));
994 error_[row] -= coeff * box_width;
995 SetNonBasicVariableStatusAndDeriveValue(col,
997 continue;
998 }
999 if (variable_values[col] == upper_bounds[col] && error_sign < 0.0) {
1000 DCHECK(IsFinite(box_width));
1001 error_[row] += coeff * box_width;
1002 SetNonBasicVariableStatusAndDeriveValue(col,
1004 continue;
1005 }
1006 }
1007}
1008
1009bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
1010 const LinearProgram& lp, bool lp_is_in_equation_form,
1011 bool* only_change_is_new_rows, bool* only_change_is_new_cols,
1012 ColIndex* num_new_cols) {
1013 SCOPED_TIME_STAT(&function_stats_);
1014 DCHECK(only_change_is_new_rows != nullptr);
1015 DCHECK(only_change_is_new_cols != nullptr);
1016 DCHECK(num_new_cols != nullptr);
1017 DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
1018 DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
1019
1020 // This works whether the lp is in equation form (with slack) or not.
1021 const bool old_part_of_matrix_is_unchanged =
1023 num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
1024
1025 // This is the only adaptation we need for the test below.
1026 const ColIndex lp_first_slack =
1027 lp_is_in_equation_form ? lp.GetFirstSlackVariable() : lp.num_variables();
1028
1029 // Test if the matrix is unchanged, and if yes, just returns true. Note that
1030 // this doesn't check the columns corresponding to the slack variables,
1031 // because they were checked by lp.IsInEquationForm() when Solve() was called.
1032 if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
1033 lp_first_slack == first_slack_col_) {
1034 // Tricky: if the parameters "use_transposed_matrix" changed since last call
1035 // we want to reflect the current state. We use the empty transposed matrix
1036 // to detect that. Recomputing the transpose when the matrix is empty is not
1037 // really a big overhead.
1038 if (parameters_.use_transposed_matrix()) {
1039 if (transposed_matrix_.IsEmpty()) {
1040 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
1041 }
1042 } else {
1043 transposed_matrix_.Reset(RowIndex(0));
1044 }
1045 return true;
1046 }
1047
1048 // Check if the new matrix can be derived from the old one just by adding
1049 // new rows (i.e new constraints).
1050 *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
1051 lp.num_constraints() > num_rows_ &&
1052 lp_first_slack == first_slack_col_;
1053
1054 // Check if the new matrix can be derived from the old one just by adding
1055 // new columns (i.e new variables).
1056 *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
1057 lp.num_constraints() == num_rows_ &&
1058 lp_first_slack > first_slack_col_;
1059 *num_new_cols = *only_change_is_new_cols ? lp_first_slack - first_slack_col_
1060 : ColIndex(0);
1061
1062 // Initialize first_slack_.
1063 first_slack_col_ = lp_first_slack;
1064
1065 // Initialize the new dimensions.
1066 num_rows_ = lp.num_constraints();
1067 num_cols_ = lp_first_slack + RowToColIndex(lp.num_constraints());
1068
1069 // Populate compact_matrix_ and transposed_matrix_ if needed.
1070 if (lp_is_in_equation_form) {
1071 // TODO(user): This can be sped up by removing the MatrixView, but then
1072 // this path will likely go away.
1073 compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
1074 } else {
1075 compact_matrix_.PopulateFromSparseMatrixAndAddSlacks(lp.GetSparseMatrix());
1076 }
1077 if (parameters_.use_transposed_matrix()) {
1078 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
1079 } else {
1080 transposed_matrix_.Reset(RowIndex(0));
1081 }
1082 return false;
1083}
1084
1085// Preconditions: This should only be called if there are only new variable
1086// in the lp.
1087bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1088 const LinearProgram& lp, bool lp_is_in_equation_form,
1089 ColIndex num_new_cols) {
1090 SCOPED_TIME_STAT(&function_stats_);
1091 DCHECK_LE(num_new_cols, first_slack_col_);
1092 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1093
1094 // Check the original variable bounds.
1095 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1096 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1097 for (ColIndex col(0); col < first_new_col; ++col) {
1098 if (lower_bounds[col] != lp.variable_lower_bounds()[col] ||
1099 upper_bounds[col] != lp.variable_upper_bounds()[col]) {
1100 return false;
1101 }
1102 }
1103
1104 // Check that each new variable has a bound of zero.
1105 for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
1106 if (lp.variable_lower_bounds()[col] != 0.0 &&
1107 lp.variable_upper_bounds()[col] != 0.0) {
1108 return false;
1109 }
1110 }
1111
1112 // Check that the slack bounds are unchanged.
1113 if (lp_is_in_equation_form) {
1114 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
1115 if (lower_bounds[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
1116 upper_bounds[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
1117 return false;
1118 }
1119 }
1120 } else {
1121 DCHECK_EQ(num_rows_, lp.num_constraints());
1122 for (RowIndex row(0); row < num_rows_; ++row) {
1123 const ColIndex col = first_slack_col_ + RowToColIndex(row);
1124 if (lower_bounds[col - num_new_cols] !=
1125 -lp.constraint_upper_bounds()[row] ||
1126 upper_bounds[col - num_new_cols] !=
1127 -lp.constraint_lower_bounds()[row]) {
1128 return false;
1129 }
1130 }
1131 }
1132 return true;
1133}
1134
1135bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
1136 const LinearProgram& lp) {
1137 SCOPED_TIME_STAT(&function_stats_);
1138
1139 bool objective_is_unchanged = true;
1140 objective_.resize(num_cols_, 0.0);
1141
1142 // This function work whether the lp is in equation form (with slack) or
1143 // without, since the objective of the slacks are always zero.
1144 DCHECK_GE(num_cols_, lp.num_variables());
1145
1146 const auto obj_coeffs = lp.objective_coefficients().const_view();
1147 for (ColIndex col(obj_coeffs.size()); col < num_cols_; ++col) {
1148 if (objective_[col] != 0.0) {
1149 objective_is_unchanged = false;
1150 objective_[col] = 0.0;
1151 }
1152 }
1153
1154 if (lp.IsMaximizationProblem()) {
1155 // Note that we use the minimization version of the objective internally.
1156 for (ColIndex col(0); col < obj_coeffs.size(); ++col) {
1157 const Fractional coeff = -obj_coeffs[col];
1158 if (objective_[col] != coeff) {
1159 objective_is_unchanged = false;
1160 objective_[col] = coeff;
1161 }
1162 }
1163 objective_offset_ = -lp.objective_offset();
1164 objective_scaling_factor_ = -lp.objective_scaling_factor();
1165 } else {
1166 for (ColIndex col(0); col < obj_coeffs.size(); ++col) {
1167 const Fractional coeff = obj_coeffs[col];
1168 if (objective_[col] != coeff) {
1169 objective_is_unchanged = false;
1170 objective_[col] = coeff;
1171 }
1172 }
1173 objective_offset_ = lp.objective_offset();
1174 objective_scaling_factor_ = lp.objective_scaling_factor();
1175 }
1176
1177 return objective_is_unchanged;
1178}
1179
1180void RevisedSimplex::InitializeObjectiveLimit() {
1181 objective_limit_reached_ = false;
1182 DCHECK(std::isfinite(objective_offset_));
1183 DCHECK(std::isfinite(objective_scaling_factor_));
1184 DCHECK_NE(0.0, objective_scaling_factor_);
1185
1186 // This sets dual_objective_limit_ and then primal_objective_limit_.
1187 for (const bool set_dual : {true, false}) {
1188 // NOTE(user): If objective_scaling_factor_ is negative, the optimization
1189 // direction was reversed (during preprocessing or inside revised simplex),
1190 // i.e., the original problem is maximization. In such case the _meaning_ of
1191 // the lower and upper limits is swapped. To this end we must change the
1192 // signs of limits, which happens automatically when calculating shifted
1193 // limits. We must also use upper (resp. lower) limit in place of lower
1194 // (resp. upper) limit when calculating the final objective_limit_.
1195 //
1196 // Choose lower limit if using the dual simplex and scaling factor is
1197 // negative or if using the primal simplex and scaling is nonnegative, upper
1198 // limit otherwise.
1199 const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
1200 ? parameters_.objective_lower_limit()
1201 : parameters_.objective_upper_limit();
1202 const Fractional shifted_limit =
1203 limit / objective_scaling_factor_ - objective_offset_;
1204 if (set_dual) {
1205 dual_objective_limit_ = shifted_limit;
1206 } else {
1207 primal_objective_limit_ = shifted_limit;
1208 }
1209 }
1210}
1211
1212// This implementation starts with an initial matrix B equal to the identity
1213// matrix (modulo a column permutation). For that it uses either the slack
1214// variables or the singleton columns present in the problem. Afterwards, the
1215// fixed slacks in the basis are exchanged with normal columns of A if possible
1216// by the InitialBasis class.
1217Status RevisedSimplex::CreateInitialBasis() {
1218 SCOPED_TIME_STAT(&function_stats_);
1219
1220 // Initialize the variable values and statuses.
1221 // Note that for the dual algorithm, boxed variables will be made
1222 // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
1223 // really matter at which of their two finite bounds they start.
1224 variables_info_.InitializeToDefaultStatus();
1225 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1226
1227 // Start by using an all-slack basis.
1228 RowToColMapping basis(num_rows_, kInvalidCol);
1229 for (RowIndex row(0); row < num_rows_; ++row) {
1230 basis[row] = SlackColIndex(row);
1231 }
1232
1233 // If possible, for the primal simplex we replace some slack variables with
1234 // some singleton columns present in the problem.
1235 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1236 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1237 if (!parameters_.use_dual_simplex() &&
1238 parameters_.initial_basis() != GlopParameters::MAROS &&
1239 parameters_.exploit_singleton_column_in_initial_basis()) {
1240 // For UseSingletonColumnInInitialBasis() to work better, we change
1241 // the value of the boxed singleton column with a non-zero cost to the best
1242 // of their two bounds.
1243 for (ColIndex col(0); col < num_cols_; ++col) {
1244 if (compact_matrix_.column(col).num_entries() != 1) continue;
1245 const VariableStatus status = variables_info_.GetStatusRow()[col];
1246 const Fractional objective = objective_[col];
1247 if (objective > 0 && IsFinite(lower_bounds[col]) &&
1249 SetNonBasicVariableStatusAndDeriveValue(col,
1251 } else if (objective < 0 && IsFinite(upper_bounds[col]) &&
1253 SetNonBasicVariableStatusAndDeriveValue(col,
1255 }
1256 }
1257
1258 // Compute the primal infeasibility of the initial variable values in
1259 // error_.
1260 ComputeVariableValuesError();
1261
1262 // TODO(user): A better but slightly more complex algorithm would be to:
1263 // - Ignore all singleton columns except the slacks during phase I.
1264 // - For this, change the slack variable bounds accordingly.
1265 // - At the end of phase I, restore the slack variable bounds and perform
1266 // the same algorithm to start with feasible and "optimal" values of the
1267 // singleton columns.
1268 basis.assign(num_rows_, kInvalidCol);
1269 UseSingletonColumnInInitialBasis(&basis);
1270
1271 // Eventually complete the basis with fixed slack columns.
1272 for (RowIndex row(0); row < num_rows_; ++row) {
1273 if (basis[row] == kInvalidCol) {
1274 basis[row] = SlackColIndex(row);
1275 }
1276 }
1277 }
1278
1279 // Use an advanced initial basis to remove the fixed variables from the basis.
1280 if (parameters_.initial_basis() == GlopParameters::NONE) {
1281 return InitializeFirstBasis(basis);
1282 }
1283 if (parameters_.initial_basis() == GlopParameters::MAROS) {
1284 InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1285 upper_bounds, variables_info_.GetTypeRow());
1286 if (parameters_.use_dual_simplex()) {
1287 // This dual version only uses zero-cost columns to complete the
1288 // basis.
1289 initial_basis.GetDualMarosBasis(num_cols_, &basis);
1290 } else {
1291 initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1292 }
1293 int number_changed = 0;
1294 for (RowIndex row(0); row < num_rows_; ++row) {
1295 if (basis[row] != SlackColIndex(row)) {
1296 number_changed++;
1297 }
1298 }
1299 VLOG(1) << "Number of Maros basis changes: " << number_changed;
1300 } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1301 parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1302 // First unassign the fixed variables from basis.
1303 int num_fixed_variables = 0;
1304 for (RowIndex row(0); row < basis.size(); ++row) {
1305 const ColIndex col = basis[row];
1306 if (lower_bounds[col] == upper_bounds[col]) {
1307 basis[row] = kInvalidCol;
1308 ++num_fixed_variables;
1309 }
1310 }
1311
1312 if (num_fixed_variables == 0) {
1313 SOLVER_LOG(logger_, "Crash is set to ", parameters_.initial_basis(),
1314 " but there is no equality rows to remove from initial all "
1315 "slack basis. Starting from there.");
1316 } else {
1317 // Then complete the basis with an advanced initial basis algorithm.
1318 SOLVER_LOG(logger_, "Trying to remove ", num_fixed_variables,
1319 " fixed variables from the initial basis.");
1320 InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1321 upper_bounds, variables_info_.GetTypeRow());
1322
1323 if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1324 if (parameters_.use_scaling()) {
1325 initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1326 } else {
1327 VLOG(1) << "Bixby initial basis algorithm requires the problem "
1328 << "to be scaled. Skipping Bixby's algorithm.";
1329 }
1330 } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1331 // Note the use of num_cols_ here because this algorithm
1332 // benefits from treating fixed slack columns like any other column.
1333 if (parameters_.use_dual_simplex()) {
1334 // This dual version only uses zero-cost columns to complete the
1335 // basis.
1336 initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1337 } else {
1338 initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1339 }
1340
1341 const Status status = InitializeFirstBasis(basis);
1342 if (status.ok()) {
1343 return status;
1344 } else {
1345 SOLVER_LOG(
1346 logger_,
1347 "Advanced basis algo failed, Reverting to all slack basis.");
1348
1349 for (RowIndex row(0); row < num_rows_; ++row) {
1350 basis[row] = SlackColIndex(row);
1351 }
1352 }
1353 }
1354 }
1355 } else {
1356 LOG(WARNING) << "Unsupported initial_basis parameters: "
1357 << parameters_.initial_basis();
1358 }
1359
1360 return InitializeFirstBasis(basis);
1361}
1362
1363Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1364 basis_ = basis;
1365
1366 // For each row which does not have a basic column, assign it to the
1367 // corresponding slack column.
1368 basis_.resize(num_rows_, kInvalidCol);
1369 for (RowIndex row(0); row < num_rows_; ++row) {
1370 if (basis_[row] == kInvalidCol) {
1371 basis_[row] = SlackColIndex(row);
1372 }
1373 }
1374
1375 GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1376 PermuteBasis();
1377
1378 // Test that the upper bound on the condition number of basis is not too high.
1379 // The number was not computed by any rigorous analysis, we just prefer to
1380 // revert to the all slack basis if the condition number of our heuristic
1381 // first basis seems bad. See for instance on cond11.mps, where we get an
1382 // infinity upper bound.
1383 const Fractional condition_number_ub =
1384 basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1385 if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1386 const std::string error_message =
1387 absl::StrCat("The matrix condition number upper bound is too high: ",
1388 condition_number_ub);
1389 SOLVER_LOG(logger_, error_message);
1390 return Status(Status::ERROR_LU, error_message);
1391 }
1392
1393 // Everything is okay, finish the initialization.
1394 for (RowIndex row(0); row < num_rows_; ++row) {
1395 variables_info_.UpdateToBasicStatus(basis_[row]);
1396 }
1397 DCHECK(BasisIsConsistent());
1398
1399 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1400 variable_values_.RecomputeBasicVariableValues();
1401
1402 if (logger_->LoggingIsEnabled()) {
1403 // TODO(user): Maybe return an error status if this is too high. Note
1404 // however that if we want to do that, we need to reset variables_info_ to a
1405 // consistent state.
1406 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1407 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1408 SOLVER_LOG(
1409 logger_,
1410 "The primal residual of the initial basis is above the tolerance, ",
1411 variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1412 }
1413 }
1414 return Status::OK();
1415}
1416
1417Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1418 parameters_ = initial_parameters_;
1419 PropagateParameters();
1420
1421 // We accept both kind of input.
1422 //
1423 // TODO(user): Ideally there should be no need to ever put the slack in the
1424 // LinearProgram. That take extra memory (one big SparseColumn per slack) and
1425 // just add visible overhead in incremental solve when one wants to add/remove
1426 // constraints. But for historical reason, we handle both for now.
1427 const bool lp_is_in_equation_form = lp.IsInEquationForm();
1428
1429 // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1430 // this is where num_rows_ and num_cols_ are computed.
1431 //
1432 // Note that these functions can't depend on use_dual_simplex() since we may
1433 // change it below.
1434 ColIndex num_new_cols(0);
1435 bool only_change_is_new_rows = false;
1436 bool only_change_is_new_cols = false;
1437 const bool matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1438 lp, lp_is_in_equation_form, &only_change_is_new_rows,
1439 &only_change_is_new_cols, &num_new_cols);
1440 const bool only_new_bounds =
1441 only_change_is_new_cols && num_new_cols > 0 &&
1442 OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1443 lp, lp_is_in_equation_form, num_new_cols);
1444
1445 // TODO(user): move objective with ReducedCosts class.
1446 const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1447
1448 const bool bounds_are_unchanged =
1449 lp_is_in_equation_form
1450 ? variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1451 lp.variable_lower_bounds(), lp.variable_upper_bounds())
1452 : variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1453 lp.variable_lower_bounds(), lp.variable_upper_bounds(),
1454 lp.constraint_lower_bounds(), lp.constraint_upper_bounds());
1455
1456 // If parameters_.allow_simplex_algorithm_change() is true and we already have
1457 // a primal (resp. dual) feasible solution, then we use the primal (resp.
1458 // dual) algorithm since there is a good chance that it will be faster.
1459 if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1460 if (objective_is_unchanged && !bounds_are_unchanged) {
1461 parameters_.set_use_dual_simplex(true);
1462 PropagateParameters();
1463 }
1464 if (bounds_are_unchanged && !objective_is_unchanged) {
1465 parameters_.set_use_dual_simplex(false);
1466 PropagateParameters();
1467 }
1468 }
1469
1470 InitializeObjectiveLimit();
1471
1472 // Computes the variable name as soon as possible for logging.
1473 // TODO(user): do we really need to store them? we could just compute them
1474 // on the fly since we do not need the speed.
1475 if (VLOG_IS_ON(2)) {
1476 SetVariableNames();
1477 }
1478
1479 // Warm-start? This is supported only if the solution_state_ is non empty,
1480 // i.e., this revised simplex i) was already used to solve a problem, or
1481 // ii) the solution state was provided externally. Note that the
1482 // solution_state_ may have nothing to do with the current problem, e.g.,
1483 // objective, matrix, and/or bounds had changed. So we support several
1484 // scenarios of warm-start depending on how did the problem change and which
1485 // simplex algorithm is used (primal or dual).
1486 bool solve_from_scratch = true;
1487
1488 // Try to perform a "quick" warm-start with no matrix factorization involved.
1489 if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1490 if (!parameters_.use_dual_simplex()) {
1491 // With primal simplex, always clear dual norms and dual pricing.
1492 // Incrementality is supported only if only change to the matrix and
1493 // bounds is adding new columns (objective may change), and that all
1494 // new columns have a bound equal to zero.
1495 dual_edge_norms_.Clear();
1496 dual_pricing_vector_.clear();
1497 if (matrix_is_unchanged && bounds_are_unchanged) {
1498 // TODO(user): Do not do that if objective_is_unchanged. Currently
1499 // this seems to break something. Investigate.
1500 reduced_costs_.ClearAndRemoveCostShifts();
1501 solve_from_scratch = false;
1502 } else if (only_change_is_new_cols && only_new_bounds) {
1503 variables_info_.InitializeFromBasisState(first_slack_col_, num_new_cols,
1504 solution_state_);
1505 variable_values_.ResetAllNonBasicVariableValues(
1506 variable_starting_values_);
1507
1508 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1509 for (ColIndex& col_ref : basis_) {
1510 if (col_ref >= first_new_col) {
1511 col_ref += num_new_cols;
1512 }
1513 }
1514
1515 // Make sure the primal edge norm are recomputed from scratch.
1516 // TODO(user): only the norms of the new columns actually need to be
1517 // computed.
1518 primal_edge_norms_.Clear();
1519 reduced_costs_.ClearAndRemoveCostShifts();
1520 solve_from_scratch = false;
1521 }
1522 } else {
1523 // With dual simplex, always clear primal norms. Incrementality is
1524 // supported only if the objective remains the same (the matrix may
1525 // contain new rows and the bounds may change).
1526 primal_edge_norms_.Clear();
1527 if (objective_is_unchanged) {
1528 if (matrix_is_unchanged) {
1529 if (!bounds_are_unchanged) {
1530 variables_info_.InitializeFromBasisState(
1531 first_slack_col_, ColIndex(0), solution_state_);
1532 variable_values_.ResetAllNonBasicVariableValues(
1533 variable_starting_values_);
1534 variable_values_.RecomputeBasicVariableValues();
1535 }
1536 solve_from_scratch = false;
1537 } else if (only_change_is_new_rows) {
1538 // For the dual-simplex, we also perform a warm start if a couple of
1539 // new rows where added.
1540 variables_info_.InitializeFromBasisState(
1541 first_slack_col_, ColIndex(0), solution_state_);
1542 dual_edge_norms_.ResizeOnNewRows(num_rows_);
1543
1544 // TODO(user): The reduced costs do not really need to be recomputed.
1545 // We just need to initialize the ones of the new slack variables to
1546 // 0.
1547 reduced_costs_.ClearAndRemoveCostShifts();
1548 dual_pricing_vector_.clear();
1549
1550 // Note that this needs to be done after the Clear() calls above.
1551 if (InitializeFirstBasis(basis_).ok()) {
1552 solve_from_scratch = false;
1553 }
1554 }
1555 }
1556 }
1557 }
1558
1559 return FinishInitialization(solve_from_scratch);
1560}
1561
1562Status RevisedSimplex::FinishInitialization(bool solve_from_scratch) {
1563 // If we couldn't perform a "quick" warm start above, we can at least try to
1564 // reuse the variable statuses.
1565 if (solve_from_scratch && !solution_state_.IsEmpty()) {
1566 basis_factorization_.Clear();
1567 reduced_costs_.ClearAndRemoveCostShifts();
1568 primal_edge_norms_.Clear();
1569 dual_edge_norms_.Clear();
1570 dual_pricing_vector_.clear();
1571
1572 // If an external basis has been provided or if the matrix changed, we need
1573 // to perform more work, e.g., factorize the proposed basis and validate it.
1574 variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
1575 solution_state_);
1576
1577 // Use the set of basic columns as a "hint" to construct the first basis.
1578 std::vector<ColIndex> candidates;
1579 for (const ColIndex col : variables_info_.GetIsBasicBitRow()) {
1580 candidates.push_back(col);
1581 }
1582 SOLVER_LOG(logger_, "The warm-start state contains ", candidates.size(),
1583 " candidates for the basis (num_rows = ", num_rows_.value(),
1584 ").");
1585
1586 // Optimization: Try to factorize it right away if we have the correct
1587 // number of element. Ideally the other path below would no require a
1588 // "double" factorization effort, so this would not be needed.
1589 if (candidates.size() == num_rows_) {
1590 basis_.clear();
1591 for (const ColIndex col : candidates) {
1592 basis_.push_back(col);
1593 }
1594
1595 // TODO(user): Depending on the error here, there is no point doing extra
1596 // work below. This is the case when we fail because of a bad initial
1597 // condition number for instance.
1598 if (InitializeFirstBasis(basis_).ok()) {
1599 solve_from_scratch = false;
1600 }
1601 }
1602
1603 if (solve_from_scratch) {
1604 basis_ = basis_factorization_.ComputeInitialBasis(candidates);
1605 const int num_super_basic =
1606 variables_info_.ChangeUnusedBasicVariablesToFree(basis_);
1607 const int num_snapped = variables_info_.SnapFreeVariablesToBound(
1608 parameters_.crossover_bound_snapping_distance(),
1609 variable_starting_values_);
1610 if (logger_->LoggingIsEnabled()) {
1611 SOLVER_LOG(logger_, "The initial basis did not use ",
1612 " BASIC columns from the initial state and used ",
1613 (num_rows_ - (candidates.size() - num_super_basic)).value(),
1614 " slack variables that were not marked BASIC.");
1615 if (num_snapped > 0) {
1616 SOLVER_LOG(logger_, num_snapped,
1617 " of the FREE variables where moved to their bound.");
1618 }
1619 }
1620
1621 if (InitializeFirstBasis(basis_).ok()) {
1622 solve_from_scratch = false;
1623 } else {
1624 SOLVER_LOG(logger_,
1625 "RevisedSimplex is not using the warm start "
1626 "basis because it is not factorizable.");
1627 }
1628 }
1629 }
1630
1631 if (solve_from_scratch) {
1632 SOLVER_LOG(logger_, "Starting basis: create from scratch.");
1633 basis_factorization_.Clear();
1634 reduced_costs_.ClearAndRemoveCostShifts();
1635 primal_edge_norms_.Clear();
1636 dual_edge_norms_.Clear();
1637 dual_pricing_vector_.clear();
1638 GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1639 } else {
1640 SOLVER_LOG(logger_, "Starting basis: incremental solve.");
1641 }
1642 DCHECK(BasisIsConsistent());
1643
1644 transpose_was_changed_ = false;
1645 return Status::OK();
1646}
1647
1648void RevisedSimplex::DisplayBasicVariableStatistics() {
1649 SCOPED_TIME_STAT(&function_stats_);
1650
1651 int num_fixed_variables = 0;
1652 int num_free_variables = 0;
1653 int num_variables_at_bound = 0;
1654 int num_slack_variables = 0;
1655 int num_infeasible_variables = 0;
1656
1657 const DenseRow& variable_values = variable_values_.GetDenseRow();
1658 const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1659 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1660 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1661 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1662 for (RowIndex row(0); row < num_rows_; ++row) {
1663 const ColIndex col = basis_[row];
1664 const Fractional value = variable_values[col];
1665 if (variable_types[col] == VariableType::UNCONSTRAINED) {
1666 ++num_free_variables;
1667 }
1668 if (value > upper_bounds[col] + tolerance ||
1669 value < lower_bounds[col] - tolerance) {
1670 ++num_infeasible_variables;
1671 }
1672 if (col >= first_slack_col_) {
1673 ++num_slack_variables;
1674 }
1675 if (lower_bounds[col] == upper_bounds[col]) {
1676 ++num_fixed_variables;
1677 } else if (variable_values[col] == lower_bounds[col] ||
1678 variable_values[col] == upper_bounds[col]) {
1679 ++num_variables_at_bound;
1680 }
1681 }
1682
1683 SOLVER_LOG(logger_, "The matrix with slacks has ",
1684 compact_matrix_.num_rows().value(), " rows, ",
1685 compact_matrix_.num_cols().value(), " columns, ",
1686 compact_matrix_.num_entries().value(), " entries.");
1687 SOLVER_LOG(logger_, "Number of basic infeasible variables: ",
1688 num_infeasible_variables);
1689 SOLVER_LOG(logger_, "Number of basic slack variables: ", num_slack_variables);
1690 SOLVER_LOG(logger_,
1691 "Number of basic variables at bound: ", num_variables_at_bound);
1692 SOLVER_LOG(logger_, "Number of basic fixed variables: ", num_fixed_variables);
1693 SOLVER_LOG(logger_, "Number of basic free variables: ", num_free_variables);
1694 SOLVER_LOG(logger_, "Number of super-basic variables: ",
1695 ComputeNumberOfSuperBasicVariables());
1696}
1697
1698void RevisedSimplex::SaveState() {
1699 DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1700 solution_state_.statuses = variables_info_.GetStatusRow();
1701 solution_state_has_been_set_externally_ = false;
1702}
1703
1704RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1705 DenseBooleanColumn contains_data(num_rows_, false);
1706 for (ColIndex col(0); col < num_cols_; ++col) {
1707 for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1708 contains_data[e.row()] = true;
1709 }
1710 }
1711 RowIndex num_empty_rows(0);
1712 for (RowIndex row(0); row < num_rows_; ++row) {
1713 if (!contains_data[row]) {
1714 ++num_empty_rows;
1715 VLOG(2) << "Row " << row << " is empty.";
1716 }
1717 }
1718 return num_empty_rows;
1719}
1720
1721ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1722 ColIndex num_empty_cols(0);
1723 for (ColIndex col(0); col < num_cols_; ++col) {
1724 if (compact_matrix_.column(col).IsEmpty()) {
1725 ++num_empty_cols;
1726 VLOG(2) << "Column " << col << " is empty.";
1727 }
1728 }
1729 return num_empty_cols;
1730}
1731
1732int RevisedSimplex::ComputeNumberOfSuperBasicVariables() const {
1733 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
1734 int num_super_basic = 0;
1735 for (ColIndex col(0); col < num_cols_; ++col) {
1736 if (variable_statuses[col] == VariableStatus::FREE &&
1737 variable_values_.Get(col) != 0.0) {
1738 ++num_super_basic;
1739 }
1740 }
1741 return num_super_basic;
1742}
1743
1744void RevisedSimplex::CorrectErrorsOnVariableValues() {
1745 SCOPED_TIME_STAT(&function_stats_);
1746 DCHECK(basis_factorization_.IsRefactorized());
1747
1748 // TODO(user): The primal residual error does not change if we take degenerate
1749 // steps or if we do not change the variable values. No need to recompute it
1750 // in this case.
1751 const Fractional primal_residual =
1752 variable_values_.ComputeMaximumPrimalResidual();
1753
1754 // If the primal_residual is within the tolerance, no need to recompute
1755 // the basic variable values with a better precision.
1756 if (primal_residual >= parameters_.harris_tolerance_ratio() *
1757 parameters_.primal_feasibility_tolerance()) {
1758 variable_values_.RecomputeBasicVariableValues();
1759 VLOG(1) << "Primal infeasibility (bounds error) = "
1760 << variable_values_.ComputeMaximumPrimalInfeasibility()
1761 << ", Primal residual |A.x - b| = "
1762 << variable_values_.ComputeMaximumPrimalResidual();
1763 }
1764}
1765
1766void RevisedSimplex::ComputeVariableValuesError() {
1767 SCOPED_TIME_STAT(&function_stats_);
1768 error_.AssignToZero(num_rows_);
1769 const DenseRow& variable_values = variable_values_.GetDenseRow();
1770 for (ColIndex col(0); col < num_cols_; ++col) {
1771 const Fractional value = variable_values[col];
1772 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1773 }
1774}
1775
1776void RevisedSimplex::ComputeDirection(ColIndex col) {
1777 SCOPED_TIME_STAT(&function_stats_);
1778 DCHECK_COL_BOUNDS(col);
1779 basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1780 Fractional norm = 0.0;
1781 if (direction_.non_zeros.empty()) {
1782 // We still compute the direction non-zeros because our code relies on it.
1783 const RowIndex num_rows = num_rows_;
1784 const DenseColumn::ConstView direction = direction_.values.const_view();
1785 for (RowIndex row(0); row < num_rows; ++row) {
1786 const Fractional value = direction[row];
1787 if (value != 0.0) {
1788 direction_.non_zeros.push_back(row);
1789 norm = std::max(norm, std::abs(value));
1790 }
1791 }
1792 } else {
1793 for (const auto e : direction_) {
1794 norm = std::max(norm, std::abs(e.coefficient()));
1795 }
1796 }
1797 direction_infinity_norm_ = norm;
1798 IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1799 num_rows_ == 0 ? 0.0
1800 : static_cast<double>(direction_.non_zeros.size()) /
1801 static_cast<double>(num_rows_.value())));
1802}
1803
1804Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1805 SCOPED_TIME_STAT(&function_stats_);
1806 compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1807 for (const auto e : direction_) {
1808 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1809 &error_);
1810 }
1811 return InfinityNorm(error_);
1812}
1813
1814template <bool is_entering_reduced_cost_positive>
1815Fractional RevisedSimplex::GetRatio(const DenseRow& lower_bounds,
1816 const DenseRow& upper_bounds,
1817 RowIndex row) const {
1818 const ColIndex col = basis_[row];
1819 const Fractional direction = direction_[row];
1820 const Fractional value = variable_values_.Get(col);
1821 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1822 DCHECK_NE(direction, 0.0);
1823 if (is_entering_reduced_cost_positive) {
1824 if (direction > 0.0) {
1825 return (upper_bounds[col] - value) / direction;
1826 } else {
1827 return (lower_bounds[col] - value) / direction;
1828 }
1829 } else {
1830 if (direction > 0.0) {
1831 return (value - lower_bounds[col]) / direction;
1832 } else {
1833 return (value - upper_bounds[col]) / direction;
1834 }
1835 }
1836}
1837
1838template <bool is_entering_reduced_cost_positive>
1839Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1840 Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1841 SCOPED_TIME_STAT(&function_stats_);
1842 const Fractional harris_tolerance =
1843 parameters_.harris_tolerance_ratio() *
1844 parameters_.primal_feasibility_tolerance();
1845 const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1846 parameters_.primal_feasibility_tolerance();
1847
1848 // Initially, we can skip any variable with a ratio greater than
1849 // bound_flip_ratio since it seems to be always better to choose the
1850 // bound-flip over such leaving variable.
1851 Fractional harris_ratio = bound_flip_ratio;
1852 leaving_candidates->Clear();
1853
1854 // If the basis is refactorized, then we should have everything with a good
1855 // precision, so we only consider "acceptable" pivots. Otherwise we consider
1856 // all the entries, and if the algorithm return a pivot that is too small, we
1857 // will refactorize and recompute the relevant quantities.
1858 const Fractional threshold = basis_factorization_.IsRefactorized()
1859 ? parameters_.minimum_acceptable_pivot()
1860 : parameters_.ratio_test_zero_threshold();
1861
1862 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1863 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1864 for (const auto e : direction_) {
1865 const Fractional magnitude = std::abs(e.coefficient());
1866 if (magnitude <= threshold) continue;
1867 const Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(
1868 lower_bounds, upper_bounds, e.row());
1869 if (ratio <= harris_ratio) {
1870 leaving_candidates->SetCoefficient(e.row(), ratio);
1871
1872 // The second max() makes sure harris_ratio is lower bounded by a small
1873 // positive value. The more classical approach is to bound it by 0.0 but
1874 // since we will always perform a small positive step, we allow any
1875 // variable to go a bit more out of bound (even if it is past the harris
1876 // tolerance). This increase the number of candidates and allows us to
1877 // choose a more numerically stable pivot.
1878 //
1879 // Note that at least lower bounding it by 0.0 is really important on
1880 // numerically difficult problems because its helps in the choice of a
1881 // stable pivot.
1882 harris_ratio = std::min(harris_ratio,
1883 std::max(minimum_delta / magnitude,
1884 ratio + harris_tolerance / magnitude));
1885 }
1886 }
1887 return harris_ratio;
1888}
1889
1890namespace {
1891
1892// Returns true if the candidate ratio is supposed to be more stable than the
1893// current ratio (or if the two are equal).
1894// The idea here is to take, by order of preference:
1895// - the minimum positive ratio in order to intoduce a primal infeasibility
1896// which is as small as possible.
1897// - or the least negative one in order to have the smallest bound shift
1898// possible on the leaving variable.
1899bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1900 if (current >= 0.0) {
1901 return candidate >= 0.0 && candidate <= current;
1902 } else {
1903 return candidate >= current;
1904 }
1905}
1906
1907} // namespace
1908
1909// Ratio-test or Quotient-test. Choose the row of the leaving variable.
1910// Known as CHUZR or CHUZRO in FORTRAN codes.
1911Status RevisedSimplex::ChooseLeavingVariableRow(
1912 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1913 RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1914 SCOPED_TIME_STAT(&function_stats_);
1915 GLOP_RETURN_ERROR_IF_NULL(refactorize);
1916 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1917 GLOP_RETURN_ERROR_IF_NULL(step_length);
1918 DCHECK_COL_BOUNDS(entering_col);
1919 DCHECK_NE(0.0, reduced_cost);
1920
1921 // A few cases will cause the test to be recomputed from the beginning.
1922 int stats_num_leaving_choices = 0;
1923 equivalent_leaving_choices_.clear();
1924 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1925 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1926 while (true) {
1927 stats_num_leaving_choices = 0;
1928
1929 // We initialize current_ratio with the maximum step the entering variable
1930 // can take (bound-flip). Note that we do not use tolerance here.
1931 const Fractional entering_value = variable_values_.Get(entering_col);
1932 Fractional current_ratio =
1933 (reduced_cost > 0.0) ? entering_value - lower_bounds[entering_col]
1934 : upper_bounds[entering_col] - entering_value;
1935 DCHECK_GT(current_ratio, 0.0);
1936
1937 // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1938 // actually computes the minimum leaving ratio of all the variables. This is
1939 // the same as the 'classic' ratio test.
1940 const Fractional harris_ratio =
1941 (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1942 current_ratio, &leaving_candidates_)
1943 : ComputeHarrisRatioAndLeavingCandidates<false>(
1944 current_ratio, &leaving_candidates_);
1945
1946 // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1947 // variable too much out of bounds), we take it as it is always stable and
1948 // fast.
1949 if (current_ratio <= harris_ratio) {
1950 *leaving_row = kInvalidRow;
1951 *step_length = current_ratio;
1952 break;
1953 }
1954
1955 // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1956 // <= harris_ratio', we choose the leaving row with the largest coefficient.
1957 //
1958 // This has a big impact, because picking a leaving variable with a small
1959 // direction_[row] is the main source of Abnormal LU errors.
1960 Fractional pivot_magnitude = 0.0;
1961 stats_num_leaving_choices = 0;
1962 *leaving_row = kInvalidRow;
1963 equivalent_leaving_choices_.clear();
1964 for (const SparseColumn::Entry e : leaving_candidates_) {
1965 const Fractional ratio = e.coefficient();
1966 if (ratio > harris_ratio) continue;
1967 ++stats_num_leaving_choices;
1968 const RowIndex row = e.row();
1969
1970 // If the magnitudes are the same, we choose the leaving variable with
1971 // what is probably the more stable ratio, see
1972 // IsRatioMoreOrEquallyStable().
1973 const Fractional candidate_magnitude = std::abs(direction_[row]);
1974 if (candidate_magnitude < pivot_magnitude) continue;
1975 if (candidate_magnitude == pivot_magnitude) {
1976 if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1977 if (ratio == current_ratio) {
1978 DCHECK_NE(kInvalidRow, *leaving_row);
1979 equivalent_leaving_choices_.push_back(row);
1980 continue;
1981 }
1982 }
1983 equivalent_leaving_choices_.clear();
1984 current_ratio = ratio;
1985 pivot_magnitude = candidate_magnitude;
1986 *leaving_row = row;
1987 }
1988
1989 // Break the ties randomly.
1990 if (!equivalent_leaving_choices_.empty()) {
1991 equivalent_leaving_choices_.push_back(*leaving_row);
1992 *leaving_row =
1993 equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1994 0, equivalent_leaving_choices_.size() - 1)(random_)];
1995 }
1996
1997 // Since we took care of the bound-flip at the beginning, at this point
1998 // we have a valid leaving row.
1999 DCHECK_NE(kInvalidRow, *leaving_row);
2000
2001 // A variable already outside one of its bounds +/- tolerance is considered
2002 // at its bound and its ratio is zero. Not doing this may lead to a step
2003 // that moves the objective in the wrong direction. We may want to allow
2004 // such steps, but then we will need to check that it doesn't break the
2005 // bounds of the other variables.
2006 if (current_ratio <= 0.0) {
2007 // Instead of doing a zero step, we do a small positive step. This
2008 // helps on degenerate problems.
2009 const Fractional minimum_delta =
2010 parameters_.degenerate_ministep_factor() *
2011 parameters_.primal_feasibility_tolerance();
2012 *step_length = minimum_delta / pivot_magnitude;
2013 } else {
2014 *step_length = current_ratio;
2015 }
2016
2017 // Note(user): Testing the pivot at each iteration is useful for debugging
2018 // an LU factorization problem. Remove the false if you need to investigate
2019 // this, it makes sure that this will be compiled away.
2020 if (/* DISABLES CODE */ (false)) {
2021 TestPivot(entering_col, *leaving_row);
2022 }
2023
2024 // We try various "heuristics" to avoid a small pivot.
2025 //
2026 // The smaller 'direction_[*leaving_row]', the less precise
2027 // it is. So we want to avoid pivoting by such a row. Small pivots lead to
2028 // ill-conditioned bases or even to matrices that are not a basis at all if
2029 // the actual (infinite-precision) coefficient is zero.
2030 //
2031 // TODO(user): We may have to choose another entering column if
2032 // we cannot prevent pivoting by a small pivot.
2033 // (Chvatal, p.115, about epsilon2.)
2034 if (pivot_magnitude <
2035 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
2036 // The first countermeasure is to recompute everything to the best
2037 // precision we can in the hope of avoiding such a choice. Note that this
2038 // helps a lot on the Netlib problems.
2039 if (!basis_factorization_.IsRefactorized()) {
2040 VLOG(1) << "Refactorizing to avoid pivoting by "
2041 << direction_[*leaving_row]
2042 << " direction_infinity_norm_ = " << direction_infinity_norm_
2043 << " reduced cost = " << reduced_cost;
2044 *refactorize = true;
2045 return Status::OK();
2046 }
2047
2048 // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
2049 // we kwnow that this pivot will still have an acceptable magnitude.
2050 //
2051 // TODO(user): An issue left to fix is that if there is no such pivot at
2052 // all, then we will report unbounded even if this is not really the case.
2053 // As of 2018/07/18, this happens on l30.mps.
2054 VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
2055 << " direction_infinity_norm_ = " << direction_infinity_norm_
2056 << " reduced cost = " << reduced_cost;
2057 DCHECK_GE(std::abs(direction_[*leaving_row]),
2058 parameters_.minimum_acceptable_pivot());
2059 IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
2060 }
2061 break;
2062 }
2063
2064 // Update the target bound.
2065 if (*leaving_row != kInvalidRow) {
2066 const bool is_reduced_cost_positive = (reduced_cost > 0.0);
2067 const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
2068 *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
2069 ? upper_bounds[basis_[*leaving_row]]
2070 : lower_bounds[basis_[*leaving_row]];
2071 }
2072
2073 // Stats.
2075 ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
2076 if (!equivalent_leaving_choices_.empty()) {
2077 ratio_test_stats_.num_perfect_ties.Add(
2078 equivalent_leaving_choices_.size());
2079 }
2080 if (*leaving_row != kInvalidRow) {
2081 ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
2082 }
2083 });
2084 return Status::OK();
2085}
2086
2087namespace {
2088
2089// Store a row with its ratio, coefficient magnitude and target bound. This is
2090// used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
2091// details.
2092struct BreakPoint {
2093 BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
2094 Fractional _target_bound)
2095 : row(_row),
2096 ratio(_ratio),
2097 coeff_magnitude(_coeff_magnitude),
2098 target_bound(_target_bound) {}
2099
2100 // We want to process the breakpoints by increasing ratio and decreasing
2101 // coefficient magnitude (if the ratios are the same). Returns false if "this"
2102 // is before "other" in a priority queue.
2103 bool operator<(const BreakPoint& other) const {
2104 if (ratio == other.ratio) {
2105 if (coeff_magnitude == other.coeff_magnitude) {
2106 return row > other.row;
2107 }
2108 return coeff_magnitude < other.coeff_magnitude;
2109 }
2110 return ratio > other.ratio;
2111 }
2112
2113 RowIndex row;
2114 Fractional ratio;
2115 Fractional coeff_magnitude;
2116 Fractional target_bound;
2117};
2118
2119} // namespace
2120
2121void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
2122 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
2123 RowIndex* leaving_row, Fractional* step_length,
2124 Fractional* target_bound) const {
2125 SCOPED_TIME_STAT(&function_stats_);
2126 RETURN_IF_NULL(refactorize);
2127 RETURN_IF_NULL(leaving_row);
2128 RETURN_IF_NULL(step_length);
2129 DCHECK_COL_BOUNDS(entering_col);
2130 DCHECK_NE(0.0, reduced_cost);
2131 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2132 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2133
2134 // We initialize current_ratio with the maximum step the entering variable
2135 // can take (bound-flip). Note that we do not use tolerance here.
2136 const Fractional entering_value = variable_values_.Get(entering_col);
2137 Fractional current_ratio = (reduced_cost > 0.0)
2138 ? entering_value - lower_bounds[entering_col]
2139 : upper_bounds[entering_col] - entering_value;
2140 DCHECK_GT(current_ratio, 0.0);
2141
2142 std::vector<BreakPoint> breakpoints;
2143 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
2144 for (const auto e : direction_) {
2145 const Fractional direction =
2146 reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
2147 const Fractional magnitude = std::abs(direction);
2148 if (magnitude < tolerance) continue;
2149
2150 // Computes by how much we can add 'direction' to the basic variable value
2151 // with index 'row' until it changes of primal feasibility status. That is
2152 // from infeasible to feasible or from feasible to infeasible. Note that the
2153 // transition infeasible->feasible->infeasible is possible. We use
2154 // tolerances here, but when the step will be performed, it will move the
2155 // variable to the target bound (possibly taking a small negative step).
2156 //
2157 // Note(user): The negative step will only happen when the leaving variable
2158 // was slightly infeasible (less than tolerance). Moreover, the overall
2159 // infeasibility will not necessarily increase since it doesn't take into
2160 // account all the variables with an infeasibility smaller than the
2161 // tolerance, and here we will at least improve the one of the leaving
2162 // variable.
2163 const ColIndex col = basis_[e.row()];
2164 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
2165
2166 const Fractional value = variable_values_.Get(col);
2167 const Fractional lower_bound = lower_bounds[col];
2168 const Fractional upper_bound = upper_bounds[col];
2169 const Fractional to_lower = (lower_bound - tolerance - value) / direction;
2170 const Fractional to_upper = (upper_bound + tolerance - value) / direction;
2171
2172 // Enqueue the possible transitions. Note that the second tests exclude the
2173 // case where to_lower or to_upper are infinite.
2174 if (to_lower >= 0.0 && to_lower < current_ratio) {
2175 breakpoints.push_back(
2176 BreakPoint(e.row(), to_lower, magnitude, lower_bound));
2177 }
2178 if (to_upper >= 0.0 && to_upper < current_ratio) {
2179 breakpoints.push_back(
2180 BreakPoint(e.row(), to_upper, magnitude, upper_bound));
2181 }
2182 }
2183
2184 // Order the breakpoints by increasing ratio and decreasing coefficient
2185 // magnitude (if the ratios are the same).
2186 std::make_heap(breakpoints.begin(), breakpoints.end());
2187
2188 // Select the last breakpoint that still improves the infeasibility and has
2189 // the largest coefficient magnitude.
2190 Fractional improvement = std::abs(reduced_cost);
2191 Fractional best_magnitude = 0.0;
2192 *leaving_row = kInvalidRow;
2193 while (!breakpoints.empty()) {
2194 const BreakPoint top = breakpoints.front();
2195 // TODO(user): consider using >= here. That will lead to bigger ratio and
2196 // hence a better impact on the infeasibility. The drawback is that more
2197 // effort may be needed to update the reduced costs.
2198 //
2199 // TODO(user): Use a random tie breaking strategy for BreakPoint with
2200 // same ratio and same coefficient magnitude? Koberstein explains in his PhD
2201 // that it helped on the dual-simplex.
2202 if (top.coeff_magnitude > best_magnitude) {
2203 *leaving_row = top.row;
2204 current_ratio = top.ratio;
2205 best_magnitude = top.coeff_magnitude;
2206 *target_bound = top.target_bound;
2207 }
2208
2209 // As long as the sum of primal infeasibilities is decreasing, we look for
2210 // pivots that are numerically more stable.
2211 improvement -= top.coeff_magnitude;
2212 if (improvement <= 0.0) break;
2213 std::pop_heap(breakpoints.begin(), breakpoints.end());
2214 breakpoints.pop_back();
2215 }
2216
2217 // Try to avoid a small pivot by refactorizing.
2218 if (*leaving_row != kInvalidRow) {
2219 const Fractional threshold =
2220 parameters_.small_pivot_threshold() * direction_infinity_norm_;
2221 if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
2222 *refactorize = true;
2223 return;
2224 }
2225 }
2226 *step_length = current_ratio;
2227}
2228
2229// This implements the pricing step for the dual simplex.
2230Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
2231 Fractional* cost_variation,
2232 Fractional* target_bound) {
2233 SCOPED_TIME_STAT(&function_stats_);
2234 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2235 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2236 GLOP_RETURN_ERROR_IF_NULL(target_bound);
2237
2238 // This is not supposed to happen, but better be safe.
2239 if (dual_prices_.Size() == 0) {
2240 variable_values_.RecomputeDualPrices(
2241 parameters_.dual_price_prioritize_norm());
2242 }
2243
2244 // Return right away if there is no leaving variable.
2245 // Fill cost_variation and target_bound otherwise.
2246 *leaving_row = dual_prices_.GetMaximum();
2247 if (*leaving_row == kInvalidRow) return Status::OK();
2248
2249 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2250 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2251 const ColIndex leaving_col = basis_[*leaving_row];
2252 const Fractional value = variable_values_.Get(leaving_col);
2253 if (value < lower_bounds[leaving_col]) {
2254 *cost_variation = lower_bounds[leaving_col] - value;
2255 *target_bound = lower_bounds[leaving_col];
2256 DCHECK_GT(*cost_variation, 0.0);
2257 } else {
2258 *cost_variation = upper_bounds[leaving_col] - value;
2259 *target_bound = upper_bounds[leaving_col];
2260 DCHECK_LT(*cost_variation, 0.0);
2261 }
2262 return Status::OK();
2263}
2264
2265namespace {
2266
2267// Returns true if a basic variable with given cost and type is to be considered
2268// as a leaving candidate for the dual phase I.
2269bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
2270 Fractional threshold) {
2271 if (cost == 0.0) return false;
2274 (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
2275 (type == VariableType::LOWER_BOUNDED && cost > threshold);
2276}
2277
2278} // namespace
2279
2280// Important: The norm should be updated before this is called.
2281template <bool use_dense_update>
2282void RevisedSimplex::OnDualPriceChange(DenseColumn::ConstView squared_norm,
2283 RowIndex row, VariableType type,
2284 Fractional threshold) {
2285 const Fractional price = dual_pricing_vector_[row];
2286 const bool is_candidate =
2287 IsDualPhaseILeavingCandidate(price, type, threshold);
2288 if (is_candidate) {
2289 if (use_dense_update) {
2290 dual_prices_.DenseAddOrUpdate(row, Square(price) / squared_norm[row]);
2291 } else {
2292 dual_prices_.AddOrUpdate(row, Square(price) / squared_norm[row]);
2293 }
2294 } else {
2295 dual_prices_.Remove(row);
2296 }
2297}
2298
2299void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
2300 ColIndex entering_col) {
2301 SCOPED_TIME_STAT(&function_stats_);
2302
2303 // If the prices are going to be recomputed, there is nothing to do. See the
2304 // logic at the beginning of DualPhaseIChooseLeavingVariableRow() which must
2305 // be in sync with this one.
2306 //
2307 // TODO(user): Move the logic in a single class, so it is easier to enforce
2308 // invariant.
2309 if (reduced_costs_.AreReducedCostsRecomputed() ||
2310 dual_edge_norms_.NeedsBasisRefactorization() ||
2311 dual_pricing_vector_.empty()) {
2312 return;
2313 }
2314
2315 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2316 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2317
2318 // Note that because the norm are also updated only on the position of the
2319 // direction, scaled_dual_pricing_vector_ will be up to date.
2320 const DenseColumn::ConstView squared_norms =
2321 dual_edge_norms_.GetEdgeSquaredNorms();
2322
2323 // Convert the dual_pricing_vector_ from the old basis into the new one (which
2324 // is the same as multiplying it by an Eta matrix corresponding to the
2325 // direction).
2326 const Fractional step =
2327 dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2328 for (const auto e : direction_) {
2329 dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2330 OnDualPriceChange(squared_norms, e.row(), variable_type[basis_[e.row()]],
2331 threshold);
2332 }
2333 dual_pricing_vector_[leaving_row] = step;
2334
2335 // The entering_col which was dual-infeasible is now dual-feasible, so we
2336 // have to remove it from the infeasibility sum.
2337 dual_pricing_vector_[leaving_row] -=
2338 dual_infeasibility_improvement_direction_[entering_col];
2339 if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2340 --num_dual_infeasible_positions_;
2341 }
2342 dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2343
2344 // The leaving variable will also be dual-feasible.
2345 dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2346
2347 // Update the leaving row entering candidate status.
2348 OnDualPriceChange(squared_norms, leaving_row, variable_type[entering_col],
2349 threshold);
2350}
2351
2352template <typename Cols>
2353void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2354 const Cols& cols) {
2355 SCOPED_TIME_STAT(&function_stats_);
2356 bool something_to_do = false;
2357 const DenseBitRow::ConstView can_decrease =
2358 variables_info_.GetCanDecreaseBitRow().const_view();
2359 const DenseBitRow::ConstView can_increase =
2360 variables_info_.GetCanIncreaseBitRow().const_view();
2361 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
2362 const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2363 auto improvement_direction = dual_infeasibility_improvement_direction_.view();
2364 for (const ColIndex col : cols) {
2365 const Fractional reduced_cost = reduced_costs[col];
2366 const Fractional sign =
2367 (can_increase[col] && reduced_cost < -tolerance) ? 1.0
2368 : (can_decrease[col] && reduced_cost > tolerance) ? -1.0
2369 : 0.0;
2370 if (sign != improvement_direction[col]) {
2371 if (sign == 0.0) {
2372 --num_dual_infeasible_positions_;
2373 } else if (improvement_direction[col] == 0.0) {
2374 ++num_dual_infeasible_positions_;
2375 }
2376 if (!something_to_do) {
2377 initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2378 initially_all_zero_scratchpad_.ClearSparseMask();
2379 initially_all_zero_scratchpad_.non_zeros.clear();
2380 something_to_do = true;
2381 }
2382
2383 // We add a factor 10 because of the scattered access.
2384 num_update_price_operations_ +=
2385 10 * compact_matrix_.column(col).num_entries().value();
2386 compact_matrix_.ColumnAddMultipleToSparseScatteredColumn(
2387 col, sign - improvement_direction[col],
2388 &initially_all_zero_scratchpad_);
2389 improvement_direction[col] = sign;
2390 }
2391 }
2392 if (something_to_do) {
2393 initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2394 initially_all_zero_scratchpad_.ClearSparseMask();
2395 const DenseColumn::ConstView squared_norms =
2396 dual_edge_norms_.GetEdgeSquaredNorms();
2397
2398 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2399 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2400 basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2401 if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2402 dual_prices_.StartDenseUpdates();
2403 for (RowIndex row(0); row < num_rows_; ++row) {
2404 if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2405 dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2406 OnDualPriceChange</*use_dense_update=*/true>(
2407 squared_norms, row, variable_type[basis_[row]], threshold);
2408 }
2409 initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2410 } else {
2411 for (const auto e : initially_all_zero_scratchpad_) {
2412 dual_pricing_vector_[e.row()] += e.coefficient();
2413 OnDualPriceChange(squared_norms, e.row(),
2414 variable_type[basis_[e.row()]], threshold);
2415 initially_all_zero_scratchpad_[e.row()] = 0.0;
2416 }
2417 }
2418 initially_all_zero_scratchpad_.non_zeros.clear();
2419 }
2420}
2421
2422Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2423 RowIndex* leaving_row, Fractional* cost_variation,
2424 Fractional* target_bound) {
2425 SCOPED_TIME_STAT(&function_stats_);
2426 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2427 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2428 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2429 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2430
2431 // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2432 // positions and contains the sign in which the reduced cost of this column
2433 // needs to move to improve the feasibility otherwise (+1 or -1).
2434 //
2435 // Its current value was the one used to compute dual_pricing_vector_ and
2436 // was updated accordingly by DualPhaseIUpdatePrice().
2437 //
2438 // If more variables changed of dual-feasibility status during the last
2439 // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2440 // take them into account.
2441 if (reduced_costs_.AreReducedCostsRecomputed() ||
2442 dual_edge_norms_.NeedsBasisRefactorization() ||
2443 dual_pricing_vector_.empty()) {
2444 // Recompute everything from scratch.
2445 num_dual_infeasible_positions_ = 0;
2446 dual_pricing_vector_.AssignToZero(num_rows_);
2447 dual_prices_.ClearAndResize(num_rows_);
2448 dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2449 DualPhaseIUpdatePriceOnReducedCostChange(
2450 variables_info_.GetIsRelevantBitRow());
2451 } else {
2452 // Update row is still equal to the row used during the last iteration
2453 // to update the reduced costs.
2454 DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2455 }
2456
2457 // If there is no dual-infeasible position, we are done.
2458 *leaving_row = kInvalidRow;
2459 if (num_dual_infeasible_positions_ == 0) return Status::OK();
2460
2461 *leaving_row = dual_prices_.GetMaximum();
2462
2463 // Returns right away if there is no leaving variable or fill the other
2464 // return values otherwise.
2465 if (*leaving_row == kInvalidRow) return Status::OK();
2466 *cost_variation = dual_pricing_vector_[*leaving_row];
2467 const ColIndex leaving_col = basis_[*leaving_row];
2468 if (*cost_variation < 0.0) {
2469 *target_bound = upper_bounds[leaving_col];
2470 } else {
2471 *target_bound = lower_bounds[leaving_col];
2472 }
2473 DCHECK(IsFinite(*target_bound));
2474 return Status::OK();
2475}
2476
2477template <typename BoxedVariableCols>
2478void RevisedSimplex::MakeBoxedVariableDualFeasible(
2479 const BoxedVariableCols& cols, bool update_basic_values) {
2480 SCOPED_TIME_STAT(&function_stats_);
2481 std::vector<ColIndex> changed_cols;
2482
2483 // It is important to flip bounds within a tolerance because of precision
2484 // errors. Otherwise, this leads to cycling on many of the Netlib problems
2485 // since this is called at each iteration (because of the bound-flipping ratio
2486 // test).
2487 //
2488 // TODO(user): During an iteration, we might want to switch with a lower
2489 // tolerance bounds flip that were deemed good so that we can more easily make
2490 // progess?
2491 const Fractional threshold = reduced_costs_.GetDualFeasibilityTolerance();
2492
2493 const DenseRow& variable_values = variable_values_.GetDenseRow();
2494 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
2495 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2496 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2497 const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2498 for (const ColIndex col : cols) {
2499 const Fractional reduced_cost = reduced_costs[col];
2500 const VariableStatus status = variable_status[col];
2501 DCHECK(variables_info_.GetTypeRow()[col] ==
2503 // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2504 DCHECK(variable_values[col] == lower_bounds[col] ||
2505 variable_values[col] == upper_bounds[col] ||
2506 status == VariableStatus::BASIC);
2507 if (reduced_cost > threshold && status == VariableStatus::AT_UPPER_BOUND) {
2508 variables_info_.UpdateToNonBasicStatus(col,
2510 changed_cols.push_back(col);
2511 } else if (reduced_cost < -threshold &&
2513 variables_info_.UpdateToNonBasicStatus(col,
2515 changed_cols.push_back(col);
2516 }
2517 }
2518
2519 if (!changed_cols.empty()) {
2520 iteration_stats_.num_dual_flips.Add(changed_cols.size());
2521 variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2522 update_basic_values);
2523 }
2524}
2525
2526Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2527 RowIndex leaving_row, Fractional target_bound) {
2528 SCOPED_TIME_STAT(&function_stats_);
2529
2530 // We just want the leaving variable to go to its target_bound.
2531 const ColIndex leaving_col = basis_[leaving_row];
2532 const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2533 Fractional unscaled_step = leaving_variable_value - target_bound;
2534
2535 // In Chvatal p 157 update_[entering_col] is used instead of
2536 // direction_[leaving_row], but the two quantities are actually the
2537 // same. This is because update_[col] is the value at leaving_row of
2538 // the right inverse of col and direction_ is the right inverse of the
2539 // entering_col. Note that direction_[leaving_row] is probably more
2540 // precise.
2541 // TODO(user): use this to check precision and trigger recomputation.
2542 return unscaled_step / direction_[leaving_row];
2543}
2544
2545bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2546 VLOG(1) << "Test pivot.";
2547 SCOPED_TIME_STAT(&function_stats_);
2548 const ColIndex leaving_col = basis_[leaving_row];
2549 basis_[leaving_row] = entering_col;
2550
2551 // TODO(user): If 'is_ok' is true, we could use the computed lu in
2552 // basis_factorization_ rather than recompute it during UpdateAndPivot().
2553 CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2554 const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2555 basis_[leaving_row] = leaving_col;
2556 return is_ok;
2557}
2558
2559// Note that this function is an optimization and that if it was doing nothing
2560// the algorithm will still be correct and work. Using it does change the pivot
2561// taken during the simplex method though.
2562void RevisedSimplex::PermuteBasis() {
2563 SCOPED_TIME_STAT(&function_stats_);
2564
2565 // Fetch the current basis column permutation and return if it is empty which
2566 // means the permutation is the identity.
2567 const ColumnPermutation& col_perm =
2568 basis_factorization_.GetColumnPermutation();
2569 if (col_perm.empty()) return;
2570
2571 // Permute basis_.
2572 ApplyColumnPermutationToRowIndexedVector(col_perm.const_view(), &basis_,
2573 &tmp_basis_);
2574
2575 // Permute dual_pricing_vector_ if needed.
2576 if (!dual_pricing_vector_.empty()) {
2577 // TODO(user): We need to permute dual_prices_ too now, we recompute
2578 // everything one each basis factorization, so this don't matter.
2579 ApplyColumnPermutationToRowIndexedVector(col_perm.const_view(),
2580 &dual_pricing_vector_,
2581 &tmp_dual_pricing_vector_);
2582 }
2583
2584 // Notify the other classes.
2585 reduced_costs_.UpdateDataOnBasisPermutation();
2586 dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2587
2588 // Finally, remove the column permutation from all subsequent solves since
2589 // it has been taken into account in basis_.
2590 basis_factorization_.SetColumnPermutationToIdentity();
2591}
2592
2593Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2594 RowIndex leaving_row,
2595 Fractional target_bound) {
2596 SCOPED_TIME_STAT(&function_stats_);
2597
2598 // Tricky and a bit hacky.
2599 //
2600 // The basis update code assumes that we already computed the left inverse of
2601 // the leaving row, otherwise it will just refactorize the basis. This left
2602 // inverse is needed by update_row_.ComputeUpdateRow(), so in most case it
2603 // will already be computed. However, in some situation we don't need the
2604 // full update row, so just the left inverse can be computed.
2605 //
2606 // TODO(user): Ideally this shouldn't be needed if we are going to refactorize
2607 // the basis anyway. So we should know that before hand which is currently
2608 // hard to do.
2609 Fractional pivot_from_update_row;
2610 if (update_row_.IsComputedFor(leaving_row)) {
2611 pivot_from_update_row = update_row_.GetCoefficient(entering_col);
2612 } else {
2613 // We only need the left inverse and the update row position at the
2614 // entering_col to check precision.
2615 update_row_.ComputeUnitRowLeftInverse(leaving_row);
2616 pivot_from_update_row = compact_matrix_.ColumnScalarProduct(
2617 entering_col, update_row_.GetUnitRowLeftInverse().values);
2618 }
2619
2620 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2621 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2622 const ColIndex leaving_col = basis_[leaving_row];
2623 const VariableStatus leaving_variable_status =
2624 lower_bounds[leaving_col] == upper_bounds[leaving_col]
2626 : target_bound == lower_bounds[leaving_col]
2629 if (variable_values_.Get(leaving_col) != target_bound) {
2630 ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2631 target_bound);
2632 }
2633 UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2634
2635 // Test precision by comparing two ways to get the "pivot".
2636 const Fractional pivot_from_direction = direction_[leaving_row];
2637 const Fractional diff =
2638 std::abs(pivot_from_update_row - pivot_from_direction);
2639 if (diff > parameters_.refactorization_threshold() *
2640 (1.0 + std::min(std::abs(pivot_from_update_row),
2641 std::abs(pivot_from_direction)))) {
2642 VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2643 << " diff = " << diff;
2644 // TODO(user): We need to be careful when we modify parameter like this
2645 // since it will not be reset until the next SetParameters() call.
2646 if (basis_factorization_.NumUpdates() < 10) {
2647 Fractional threshold = parameters_.lu_factorization_pivot_threshold();
2648 threshold = std::min(threshold * 1.5, 0.9);
2649 VLOG(1) << "Increasing LU pivot threshold " << threshold;
2650 parameters_.set_lu_factorization_pivot_threshold(threshold);
2651 basis_factorization_.SetParameters(parameters_);
2652 }
2653
2654 last_refactorization_reason_ = RefactorizationReason::IMPRECISE_PIVOT;
2655 GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2656 } else {
2658 basis_factorization_.Update(entering_col, leaving_row, direction_));
2659 }
2660 if (basis_factorization_.IsRefactorized()) {
2661 PermuteBasis();
2662 }
2663 return Status::OK();
2664}
2665
2666Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2667 SCOPED_TIME_STAT(&function_stats_);
2668 if (*refactorize && !basis_factorization_.IsRefactorized()) {
2669 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2670 update_row_.Invalidate();
2671 PermuteBasis();
2672 }
2673 *refactorize = false;
2674 return Status::OK();
2675}
2676
2678 if (col >= integrality_scale_.size()) {
2679 integrality_scale_.resize(col + 1, 0.0);
2680 }
2681 integrality_scale_[col] = scale;
2682}
2683
2684Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2686 Cleanup update_deterministic_time_on_return(
2687 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2688
2689 // Get all non-basic variables with a reduced costs close to zero.
2690 // Note that because we only choose entering candidate with a cost of zero,
2691 // this set will not change (modulo epsilons).
2692 const DenseRow::ConstView rc = reduced_costs_.GetReducedCosts();
2693 std::vector<ColIndex> candidates;
2694 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2695 if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2696 if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2697 }
2698
2699 bool refactorize = false;
2700 int num_pivots = 0;
2701 Fractional total_gain = 0.0;
2702 for (int i = 0; i < 10; ++i) {
2703 AdvanceDeterministicTime(time_limit);
2704 if (time_limit->LimitReached()) break;
2705 if (num_pivots >= 5) break;
2706 if (candidates.empty()) break;
2707
2708 // Pick a random one and remove it from the list.
2709 const int index =
2710 std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2711 const ColIndex entering_col = candidates[index];
2712 std::swap(candidates[index], candidates.back());
2713 candidates.pop_back();
2714
2715 // We need the entering variable to move in the correct direction.
2716 Fractional fake_rc = 1.0;
2717 if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2718 CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2719 fake_rc = -1.0;
2720 }
2721
2722 // Refactorize if needed.
2723 if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2724 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2725
2726 // Compute the direction and by how much we can move along it.
2727 ComputeDirection(entering_col);
2728 Fractional step_length;
2729 RowIndex leaving_row;
2730 Fractional target_bound;
2731 bool local_refactorize = false;
2733 ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2734 &leaving_row, &step_length, &target_bound));
2735
2736 if (local_refactorize) continue;
2737 if (step_length == kInfinity || step_length == -kInfinity) continue;
2738 if (std::abs(step_length) <= 1e-6) continue;
2739 if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2740 continue;
2741 }
2742 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2743
2744 // Evaluate if pivot reduce the fractionality of the basis.
2745 //
2746 // TODO(user): Count with more weight variable with a small domain, i.e.
2747 // binary variable, compared to a variable in [0, 1k] ?
2748 const auto get_diff = [this](ColIndex col, Fractional old_value,
2749 Fractional new_value) {
2750 if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2751 return Fractional(0.0);
2752 }
2753 const Fractional s = integrality_scale_[col];
2754 return (std::abs(new_value * s - std::round(new_value * s)) -
2755 std::abs(old_value * s - std::round(old_value * s)));
2756 };
2757 Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2758 variable_values_.Get(entering_col) + step);
2759 for (const auto e : direction_) {
2760 const ColIndex col = basis_[e.row()];
2761 const Fractional old_value = variable_values_.Get(col);
2762 const Fractional new_value = old_value - e.coefficient() * step;
2763 diff += get_diff(col, old_value, new_value);
2764 }
2765
2766 // Ignore low decrease in integrality.
2767 if (diff > -1e-2) continue;
2768 total_gain -= diff;
2769
2770 // We perform the change.
2771 num_pivots++;
2772 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2773
2774 // This is a bound flip of the entering column.
2775 if (leaving_row == kInvalidRow) {
2776 if (step > 0.0) {
2777 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2779 } else if (step < 0.0) {
2780 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2782 }
2783 continue;
2784 }
2785
2786 // Perform the pivot.
2787 const ColIndex leaving_col = basis_[leaving_row];
2788 update_row_.ComputeUpdateRow(leaving_row);
2789
2790 // Note that this will only do work if the norms are computed.
2791 //
2792 // TODO(user): We should probably move all the "update" in a function so
2793 // that all "iterations" function can just reuse the same code. Everything
2794 // that is currently not "cleared" should be updated. If one does not want
2795 // that, then it is easy to call Clear() on the quantities that do not needs
2796 // to be kept in sync with the current basis.
2797 primal_edge_norms_.UpdateBeforeBasisPivot(
2798 entering_col, leaving_col, leaving_row, direction_, &update_row_);
2799 dual_edge_norms_.UpdateBeforeBasisPivot(
2800 entering_col, leaving_row, direction_,
2801 update_row_.GetUnitRowLeftInverse());
2802
2803 // TODO(user): Rather than maintaining this, it is probably better to
2804 // recompute it in one go after Polish() is done. We don't use the reduced
2805 // costs here as we just assume that the set of candidates does not change.
2806 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2807 &update_row_);
2808
2809 const Fractional dir = -direction_[leaving_row] * step;
2810 const bool is_degenerate =
2811 (dir == 0.0) ||
2812 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2813 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2814 if (!is_degenerate) {
2815 variable_values_.Set(leaving_col, target_bound);
2816 }
2818 UpdateAndPivot(entering_col, leaving_row, target_bound));
2819 }
2820
2821 VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2822 return Status::OK();
2823}
2824
2825// Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2826// x an n-vector.
2827//
2828// x is split in two parts x_B and x_N (B standing for basis).
2829// In the same way, A is split in A_B (also known as B) and A_N, and
2830// c is split into c_B and c_N.
2831//
2832// The goal is to minimize c_B.x_B + c_N.x_N
2833// subject to B.x_B + A_N.x_N = 0
2834// and x_lower <= x <= x_upper.
2835//
2836// To minimize c.x, at each iteration a variable from x_N is selected to
2837// enter the basis, and a variable from x_B is selected to leave the basis.
2838// To avoid explicit inversion of B, the algorithm solves two sub-systems:
2839// y.B = c_B and B.d = a (a being the entering column).
2840Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) {
2842 Cleanup update_deterministic_time_on_return(
2843 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2844 num_consecutive_degenerate_iterations_ = 0;
2845 bool refactorize = false;
2846 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
2847
2848 // At this point, we are not sure the prices are always up to date, so
2849 // lets always reset them for the first iteration below.
2850 primal_prices_.ForceRecomputation();
2851
2852 if (phase_ == Phase::FEASIBILITY) {
2853 // Initialize the primal phase-I objective.
2854 // Note that this temporarily erases the problem objective.
2855 objective_.AssignToZero(num_cols_);
2856 variable_values_.UpdatePrimalPhaseICosts(
2857 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2858 reduced_costs_.ResetForNewObjective();
2859 }
2860
2861 while (true) {
2862 AdvanceDeterministicTime(time_limit);
2863 if (time_limit->LimitReached()) break;
2864
2865 // TODO(user): we may loop a bit more than the actual number of iteration.
2866 // fix.
2867 ScopedTimeDistributionUpdater timer(&iteration_stats_.total);
2868
2869 // Trigger a refactorization if one of the class we use request it.
2870 if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) {
2871 last_refactorization_reason_ = RefactorizationReason::RC;
2872 refactorize = true;
2873 }
2874 if (!refactorize && primal_edge_norms_.NeedsBasisRefactorization()) {
2875 last_refactorization_reason_ = RefactorizationReason::NORM;
2876 refactorize = true;
2877 }
2878 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2879
2880 if (basis_factorization_.IsRefactorized()) {
2881 CorrectErrorsOnVariableValues();
2882 DisplayIterationInfo(/*primal=*/true, last_refactorization_reason_);
2883 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
2884
2885 if (phase_ == Phase::FEASIBILITY) {
2886 // Since the variable values may have been recomputed, we need to
2887 // recompute the primal infeasible variables and update their costs.
2888 if (variable_values_.UpdatePrimalPhaseICosts(
2889 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2890 &objective_)) {
2891 reduced_costs_.ResetForNewObjective();
2892 }
2893 }
2894
2895 // Computing the objective at each iteration takes time, so we just
2896 // check the limit when the basis is refactorized.
2897 if (phase_ == Phase::OPTIMIZATION &&
2898 ComputeObjectiveValue() < primal_objective_limit_) {
2899 VLOG(1) << "Stopping the primal simplex because"
2900 << " the objective limit " << primal_objective_limit_
2901 << " has been reached.";
2902 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2903 objective_limit_reached_ = true;
2904 return Status::OK();
2905 }
2906 } else if (phase_ == Phase::FEASIBILITY) {
2907 // Note that direction_.non_zeros contains the positions of the basic
2908 // variables whose values were updated during the last iteration.
2909 if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2910 &objective_)) {
2911 reduced_costs_.ResetForNewObjective();
2912 }
2913 }
2914
2915 const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
2916 if (entering_col == kInvalidCol) {
2917 if (reduced_costs_.AreReducedCostsPrecise() &&
2918 basis_factorization_.IsRefactorized()) {
2919 if (phase_ == Phase::FEASIBILITY) {
2920 const Fractional primal_infeasibility =
2921 variable_values_.ComputeMaximumPrimalInfeasibility();
2922 if (primal_infeasibility <
2923 parameters_.primal_feasibility_tolerance()) {
2924 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2925 } else {
2926 VLOG(1) << "Infeasible problem! infeasibility = "
2927 << primal_infeasibility;
2928 problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2929 }
2930 } else {
2931 problem_status_ = ProblemStatus::OPTIMAL;
2932 }
2933 break;
2934 }
2935
2936 VLOG(1) << "Optimal reached, double checking...";
2937 reduced_costs_.MakeReducedCostsPrecise();
2938 refactorize = true;
2939 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
2940 continue;
2941 }
2942
2943 DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2944
2945 // Solve the system B.d = a with a the entering column.
2946 ComputeDirection(entering_col);
2947
2948 // This might trigger a recomputation on the next iteration. If it returns
2949 // false, we will also try to see if there is not another more promising
2950 // entering column.
2951 if (!primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2952 direction_)) {
2953 primal_prices_.RecomputePriceAt(entering_col);
2954 continue;
2955 }
2956 const Fractional reduced_cost =
2957 reduced_costs_.TestEnteringReducedCostPrecision(entering_col,
2958 direction_);
2959
2960 // The test might have changed the reduced cost of the entering_col.
2961 // If it is no longer a valid entering candidate, we loop.
2962 primal_prices_.RecomputePriceAt(entering_col);
2963 if (!reduced_costs_.IsValidPrimalEnteringCandidate(entering_col)) {
2964 reduced_costs_.MakeReducedCostsPrecise();
2965 VLOG(1) << "Skipping col #" << entering_col
2966 << " whose reduced cost is no longer valid under precise reduced "
2967 "cost: "
2968 << reduced_cost;
2969 continue;
2970 }
2971
2972 // This test takes place after the check for optimality/feasibility because
2973 // when running with 0 iterations, we still want to report
2974 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2975 // case at the beginning of the algorithm.
2976 if (num_iterations_ == parameters_.max_number_of_iterations()) break;
2977
2978 Fractional step_length;
2979 RowIndex leaving_row;
2980 Fractional target_bound;
2981 if (phase_ == Phase::FEASIBILITY) {
2982 PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2983 &refactorize, &leaving_row,
2984 &step_length, &target_bound);
2985 } else {
2987 ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2988 &leaving_row, &step_length, &target_bound));
2989 }
2990 if (refactorize) {
2991 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
2992 continue;
2993 }
2994
2995 if (step_length == kInfinity || step_length == -kInfinity) {
2996 // On a validated input, we shouldn't have a length of -infinity even
2997 // though it can be slightly negative in some settings.
2998 DCHECK_NE(step_length, -kInfinity);
2999 if (!basis_factorization_.IsRefactorized() ||
3000 !reduced_costs_.AreReducedCostsPrecise()) {
3001 VLOG(1) << "Infinite step length, double checking...";
3002 reduced_costs_.MakeReducedCostsPrecise();
3003 refactorize = true;
3004 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3005 continue;
3006 }
3007 if (phase_ == Phase::FEASIBILITY) {
3008 // This shouldn't happen by construction.
3009 VLOG(1) << "Unbounded feasibility problem !?";
3010 problem_status_ = ProblemStatus::ABNORMAL;
3011 } else {
3012 problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
3013 solution_primal_ray_.AssignToZero(num_cols_);
3014 for (RowIndex row(0); row < num_rows_; ++row) {
3015 const ColIndex col = basis_[row];
3016 solution_primal_ray_[col] = -direction_[row];
3017 }
3018 solution_primal_ray_[entering_col] = 1.0;
3019 if (reduced_cost > 0.0) {
3020 ChangeSign(&solution_primal_ray_);
3021 }
3022 }
3023 break;
3024 }
3025
3026 Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
3027 if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
3028 // For phase-I we currently always set the leaving variable to its exact
3029 // bound even if by doing so we may take a small step in the wrong
3030 // direction and may increase the overall infeasibility.
3031 //
3032 // TODO(user): Investigate alternatives even if this seems to work well in
3033 // practice. Note that the final returned solution will have the property
3034 // that all non-basic variables are at their exact bound, so it is nice
3035 // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
3036 // this property cannot be found.
3037 step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3038 }
3039
3040 // Store the leaving_col before basis_ change.
3041 const ColIndex leaving_col =
3042 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3043
3044 // An iteration is called 'degenerate' if the leaving variable is already
3045 // primal-infeasible and we make it even more infeasible or if we do a zero
3046 // step.
3047 bool is_degenerate = false;
3048 if (leaving_row != kInvalidRow) {
3049 Fractional dir = -direction_[leaving_row] * step;
3050 is_degenerate =
3051 (dir == 0.0) ||
3052 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3053 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3054
3055 // If the iteration is not degenerate, the leaving variable should go to
3056 // its exact target bound (it is how the step is computed).
3057 if (!is_degenerate) {
3058 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3059 target_bound));
3060 }
3061 }
3062
3063 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3064 if (leaving_row != kInvalidRow) {
3065 // Important: the norm must be updated before the reduced_cost.
3066 primal_edge_norms_.UpdateBeforeBasisPivot(
3067 entering_col, basis_[leaving_row], leaving_row, direction_,
3068 &update_row_);
3069 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
3070 direction_, &update_row_);
3071 primal_prices_.UpdateBeforeBasisPivot(entering_col, &update_row_);
3072 if (!is_degenerate) {
3073 // On a non-degenerate iteration, the leaving variable should be at its
3074 // exact bound. This corrects an eventual small numerical error since
3075 // 'value + direction * step' where step is
3076 // '(target_bound - value) / direction'
3077 // may be slighlty different from target_bound.
3078 variable_values_.Set(leaving_col, target_bound);
3079 }
3081 UpdateAndPivot(entering_col, leaving_row, target_bound));
3083 if (is_degenerate) {
3084 timer.AlsoUpdate(&iteration_stats_.degenerate);
3085 } else {
3086 timer.AlsoUpdate(&iteration_stats_.normal);
3087 }
3088 });
3089 } else {
3090 // Bound flip. This makes sure that the flipping variable is at its bound
3091 // and has the correct status.
3093 variables_info_.GetTypeRow()[entering_col]);
3094 if (step > 0.0) {
3095 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3097 } else if (step < 0.0) {
3098 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3100 }
3101 primal_prices_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
3102 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3103 }
3104
3105 if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
3106 // Set the leaving variable to its exact bound.
3107 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3108
3109 // Change the objective value of the leaving variable to zero.
3110 reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
3111 &objective_[leaving_col]);
3112 primal_prices_.RecomputePriceAt(leaving_col);
3113 }
3114
3115 // Stats about consecutive degenerate iterations.
3116 if (step_length == 0.0) {
3117 num_consecutive_degenerate_iterations_++;
3118 } else {
3119 if (num_consecutive_degenerate_iterations_ > 0) {
3120 iteration_stats_.degenerate_run_size.Add(
3121 num_consecutive_degenerate_iterations_);
3122 num_consecutive_degenerate_iterations_ = 0;
3123 }
3124 }
3125 ++num_iterations_;
3126 }
3127 if (num_consecutive_degenerate_iterations_ > 0) {
3128 iteration_stats_.degenerate_run_size.Add(
3129 num_consecutive_degenerate_iterations_);
3130 }
3131 return Status::OK();
3132}
3133
3134// TODO(user): Two other approaches for the phase I described in Koberstein's
3135// PhD thesis seem worth trying at some point:
3136// - The subproblem approach, which enables one to use a normal phase II dual,
3137// but requires an efficient bound-flipping ratio test since the new problem
3138// has all its variables boxed. This one is implemented now, but require
3139// a bit more tuning.
3140// - Pan's method, which is really fast but have no theoretical guarantee of
3141// terminating and thus needs to use one of the other methods as a fallback if
3142// it fails to make progress.
3143//
3144// Note that the returned status applies to the primal problem!
3145Status RevisedSimplex::DualMinimize(bool feasibility_phase,
3146 TimeLimit* time_limit) {
3147 Cleanup update_deterministic_time_on_return(
3148 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3149 num_consecutive_degenerate_iterations_ = 0;
3150 bool refactorize = false;
3151 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
3152
3153 bound_flip_candidates_.clear();
3154
3155 // Leaving variable.
3156 RowIndex leaving_row;
3157 Fractional cost_variation;
3158 Fractional target_bound;
3159
3160 // Entering variable.
3161 ColIndex entering_col;
3162
3163 while (true) {
3164 AdvanceDeterministicTime(time_limit);
3165 if (time_limit->LimitReached()) break;
3166
3167 // TODO(user): we may loop a bit more than the actual number of iteration.
3168 // fix.
3169 ScopedTimeDistributionUpdater timer(&iteration_stats_.total);
3170
3171 // Trigger a refactorization if one of the class we use request it.
3172 //
3173 // TODO(user): Estimate when variable values are imprecise and refactor too.
3174 const bool old_refactorize_value = refactorize;
3175 if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) {
3176 last_refactorization_reason_ = RefactorizationReason::RC;
3177 refactorize = true;
3178 }
3179 if (!refactorize && dual_edge_norms_.NeedsBasisRefactorization()) {
3180 last_refactorization_reason_ = RefactorizationReason::NORM;
3181 refactorize = true;
3182 }
3183 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3184
3185 // If the basis is refactorized, we recompute all the values in order to
3186 // have a good precision.
3187 if (basis_factorization_.IsRefactorized()) {
3188 // We do not want to recompute the reduced costs too often, this is
3189 // because that may break the overall direction taken by the last steps
3190 // and may lead to less improvement on degenerate problems.
3191 //
3192 // For now, we just recompute them if refactorize was set during the
3193 // loop and not because of normal refactorization.
3194 //
3195 // During phase-I, we do want the reduced costs to be as precise as
3196 // possible. TODO(user): Investigate why and fix the TODO in
3197 // PermuteBasis().
3198 //
3199 // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
3200 // do recompute them, it is better to do that first.
3201 if (feasibility_phase || old_refactorize_value) {
3202 reduced_costs_.MakeReducedCostsPrecise();
3203 }
3204
3205 // TODO(user): Make RecomputeBasicVariableValues() do nothing
3206 // if it was already recomputed on a refactorized basis. This is the
3207 // same behavior as MakeReducedCostsPrecise().
3208 //
3209 // TODO(user): Do not recompute the variable values each time we
3210 // refactorize the matrix, like for the reduced costs? That may lead to
3211 // a worse behavior than keeping the "imprecise" version and only
3212 // recomputing it when its precision is above a threshold.
3213 if (!feasibility_phase) {
3214 MakeBoxedVariableDualFeasible(
3215 variables_info_.GetNonBasicBoxedVariables(),
3216 /*update_basic_values=*/false);
3217 variable_values_.RecomputeBasicVariableValues();
3218 variable_values_.RecomputeDualPrices(
3219 parameters_.dual_price_prioritize_norm());
3220
3221 // Computing the objective at each iteration takes time, so we just
3222 // check the limit when the basis is refactorized.
3223 //
3224 // Hack: We need phase_ here and not the local feasibility_phase
3225 // variable because this must not be checked for the dual phase I algo
3226 // that use the same code as the dual phase II (i.e. the local
3227 // feasibility_phase will be false).
3228 if (phase_ == Phase::OPTIMIZATION &&
3229 dual_objective_limit_ != kInfinity &&
3230 ComputeObjectiveValue() > dual_objective_limit_) {
3231 SOLVER_LOG(logger_,
3232 "Stopping the dual simplex because"
3233 " the objective limit ",
3234 dual_objective_limit_, " has been reached.");
3235 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3236 objective_limit_reached_ = true;
3237 return Status::OK();
3238 }
3239 }
3240
3241 DisplayIterationInfo(/*primal=*/false, last_refactorization_reason_);
3242 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
3243 } else {
3244 // Updates from the previous iteration that can be skipped if we
3245 // recompute everything (see other case above).
3246 if (!feasibility_phase) {
3247 // Make sure the boxed variables are dual-feasible before choosing the
3248 // leaving variable row.
3249 MakeBoxedVariableDualFeasible(bound_flip_candidates_,
3250 /*update_basic_values=*/true);
3251 bound_flip_candidates_.clear();
3252
3253 // The direction_.non_zeros contains the positions for which the basic
3254 // variable value was changed during the previous iterations.
3255 variable_values_.UpdateDualPrices(direction_.non_zeros);
3256 }
3257 }
3258
3259 if (feasibility_phase) {
3260 GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
3261 &leaving_row, &cost_variation, &target_bound));
3262 } else {
3263 GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
3264 &leaving_row, &cost_variation, &target_bound));
3265 }
3266 if (leaving_row == kInvalidRow) {
3267 // TODO(user): integrate this with the main "re-optimization" loop.
3268 // Also distinguish cost perturbation and shifts?
3269 if (!basis_factorization_.IsRefactorized() ||
3270 reduced_costs_.HasCostShift()) {
3271 VLOG(1) << "Optimal reached, double checking.";
3272 reduced_costs_.ClearAndRemoveCostShifts();
3273 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3274 refactorize = true;
3275 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3276 continue;
3277 }
3278 if (feasibility_phase) {
3279 // Note that since the basis is refactorized, the variable values
3280 // will be recomputed at the beginning of the second phase. The boxed
3281 // variable values will also be corrected by
3282 // MakeBoxedVariableDualFeasible().
3283 if (num_dual_infeasible_positions_ == 0) {
3284 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3285 } else {
3286 VLOG(1) << "DUAL infeasible in dual phase I.";
3287 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
3288 }
3289 } else {
3290 problem_status_ = ProblemStatus::OPTIMAL;
3291 }
3292 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3293 return Status::OK();
3294 }
3295
3296 update_row_.ComputeUnitRowLeftInverse(leaving_row);
3297 if (!dual_edge_norms_.TestPrecision(leaving_row,
3298 update_row_.GetUnitRowLeftInverse())) {
3299 // We rechoose a potentially different leaving row. Note that if we choose
3300 // the same, we shouldn't go back here since the norm will now pass the
3301 // test.
3302 if (feasibility_phase) {
3303 const Fractional price = dual_pricing_vector_[leaving_row];
3304 const DenseColumn::ConstView squared_norms =
3305 dual_edge_norms_.GetEdgeSquaredNorms();
3306 dual_prices_.AddOrUpdate(leaving_row,
3307 Square(price) / squared_norms[leaving_row]);
3308 } else {
3309 variable_values_.UpdateDualPrices({leaving_row});
3310 }
3311 continue;
3312 }
3313 update_row_.ComputeUpdateRow(leaving_row);
3314
3315 if (feasibility_phase) {
3316 GLOP_RETURN_IF_ERROR(entering_variable_.DualPhaseIChooseEnteringColumn(
3317 reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3318 &entering_col));
3319 } else {
3320 GLOP_RETURN_IF_ERROR(entering_variable_.DualChooseEnteringColumn(
3321 reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3322 &bound_flip_candidates_, &entering_col));
3323 }
3324
3325 // No entering_col: dual unbounded (i.e. primal infeasible).
3326 if (entering_col == kInvalidCol) {
3327 if (!reduced_costs_.AreReducedCostsPrecise()) {
3328 VLOG(1) << "No entering column. Double checking...";
3329 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3330 refactorize = true;
3331 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3332 continue;
3333 }
3334 DCHECK(basis_factorization_.IsRefactorized());
3335 if (feasibility_phase) {
3336 // This shouldn't happen by construction.
3337 VLOG(1) << "Unbounded dual feasibility problem !?";
3338 problem_status_ = ProblemStatus::ABNORMAL;
3339 } else {
3340 problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
3341 solution_dual_ray_ =
3342 Transpose(update_row_.GetUnitRowLeftInverse().values);
3343 update_row_.ComputeFullUpdateRow(leaving_row,
3344 &solution_dual_ray_row_combination_);
3345 if (cost_variation < 0) {
3346 ChangeSign(&solution_dual_ray_);
3347 ChangeSign(&solution_dual_ray_row_combination_);
3348 }
3349 }
3350 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3351 return Status::OK();
3352 }
3353
3354 // If the coefficient is too small, we recompute the reduced costs if not
3355 // already done. This is an extra heuristic to avoid computing the direction
3356 // If the pivot is small. But the real recomputation step is just below.
3357 const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
3358 if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
3359 !reduced_costs_.AreReducedCostsPrecise()) {
3360 VLOG(1) << "Trying not to pivot by " << entering_coeff;
3361 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3362 refactorize = true;
3363 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
3364 continue;
3365 }
3366
3367 ComputeDirection(entering_col);
3368
3369 // If the pivot is small compared to others in the direction_ vector we try
3370 // to recompute everything. If we cannot, then note that
3371 // DualChooseEnteringColumn() should guaranteed that the pivot is not too
3372 // small when everything has already been recomputed.
3373 if (std::abs(direction_[leaving_row]) <
3374 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
3375 if (!reduced_costs_.AreReducedCostsPrecise()) {
3376 VLOG(1) << "Trying not pivot by " << entering_coeff << " ("
3377 << direction_[leaving_row]
3378 << ") because the direction has a norm of "
3379 << direction_infinity_norm_;
3380 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3381 refactorize = true;
3382 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
3383 continue;
3384 }
3385 }
3386
3387 // This is to avoid crashes below. It seems to happen rarely on a few miplib
3388 // problem like rmine11.pb.gz in multithread.
3389 //
3390 // TODO(user): Try to recover instead.
3391 if (std::abs(direction_[leaving_row]) <= 1e-20) {
3392 const std::string error_message = absl::StrCat(
3393 "trying to pivot with number too small: ", direction_[leaving_row]);
3394 SOLVER_LOG(logger_, error_message);
3395 return Status(Status::ERROR_LU, error_message);
3396 }
3397
3398 // This test takes place after the check for optimality/feasibility because
3399 // when running with 0 iterations, we still want to report
3400 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
3401 // case at the beginning of the algorithm.
3402 if (num_iterations_ == parameters_.max_number_of_iterations()) {
3403 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3404 return Status::OK();
3405 }
3406
3407 // Before we update the reduced costs, if its sign is already dual
3408 // infeasible and the update direction will make it worse we make sure the
3409 // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that
3410 // goes in the wrong direction (a few experiments seems to indicate that
3411 // this is not a good idea). See comment at the top of UpdateReducedCosts().
3412 //
3413 // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in
3414 // order to do a non-zero step. This helps on degenerate problems. Like the
3415 // pertubation, we will remove all these shifts at the end.
3416 const bool increasing_rc_is_needed =
3417 (cost_variation > 0.0) == (entering_coeff > 0.0);
3418 reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col);
3419
3421 if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed,
3422 entering_col)) {
3423 timer.AlsoUpdate(&iteration_stats_.degenerate);
3424 } else {
3425 timer.AlsoUpdate(&iteration_stats_.normal);
3426 }
3427 });
3428
3429 // Update basis. Note that direction_ is already computed.
3430 //
3431 // TODO(user): this is pretty much the same in the primal or dual code.
3432 // We just need to know to what bound the leaving variable will be set to.
3433 // Factorize more common code?
3434 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
3435 &update_row_);
3436 dual_edge_norms_.UpdateBeforeBasisPivot(
3437 entering_col, leaving_row, direction_,
3438 update_row_.GetUnitRowLeftInverse());
3439
3440 // During phase I, we do not need the basic variable values at all.
3441 // Important: The norm should be updated before that.
3442 Fractional primal_step = 0.0;
3443 if (feasibility_phase) {
3444 DualPhaseIUpdatePrice(leaving_row, entering_col);
3445 } else {
3446 primal_step =
3447 ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3448 variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
3449 }
3450
3451 // It is important to do the actual pivot after the update above!
3452 const ColIndex leaving_col = basis_[leaving_row];
3454 UpdateAndPivot(entering_col, leaving_row, target_bound));
3455
3456 // This makes sure the leaving variable is at its exact bound. Tests
3457 // indicate that this makes everything more stable. Note also that during
3458 // the feasibility phase, the variable values are not used, but that the
3459 // correct non-basic variable value are needed at the end.
3460 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3461
3462 ++num_iterations_;
3463 }
3464 return Status::OK();
3465}
3466
3467Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) {
3469 Cleanup update_deterministic_time_on_return(
3470 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3471 bool refactorize = false;
3472
3473 // We clear all the quantities that we don't update so they will be recomputed
3474 // later if needed.
3475 primal_edge_norms_.Clear();
3476 dual_edge_norms_.Clear();
3477 update_row_.Invalidate();
3478 reduced_costs_.ClearAndRemoveCostShifts();
3479
3480 std::vector<ColIndex> super_basic_cols;
3481 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3482 if (variables_info_.GetStatusRow()[col] == VariableStatus::FREE &&
3483 variable_values_.Get(col) != 0) {
3484 super_basic_cols.push_back(col);
3485 }
3486 }
3487
3488 while (!super_basic_cols.empty()) {
3489 AdvanceDeterministicTime(time_limit);
3490 if (time_limit->LimitReached()) break;
3491
3492 ScopedTimeDistributionUpdater timer(&iteration_stats_.total);
3493 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3494 if (basis_factorization_.IsRefactorized()) {
3495 CorrectErrorsOnVariableValues();
3496 DisplayIterationInfo(/*primal=*/true);
3497 }
3498
3499 // TODO(user): Select at random like in Polish().
3500 ColIndex entering_col = super_basic_cols.back();
3501
3502 DCHECK(variables_info_.GetCanDecreaseBitRow()[entering_col]);
3503 DCHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
3504
3505 // Decide which direction to send the entering column.
3506 // UNCONSTRAINED variables go towards zero. Other variables go towards their
3507 // closest bound. We assume that we're at an optimal solution, so all FREE
3508 // variables have approximately zero reduced cost, which means that the
3509 // objective value won't change from moving this column into the basis.
3510 // TODO(user): As an improvement for variables with two bounds, try both
3511 // and pick one that doesn't require a basis change (if possible), otherwise
3512 // pick the closer bound.
3513 Fractional fake_rc;
3514 const Fractional entering_value = variable_values_.Get(entering_col);
3515 if (variables_info_.GetTypeRow()[entering_col] ==
3517 if (entering_value > 0) {
3518 fake_rc = 1.0;
3519 } else {
3520 fake_rc = -1.0;
3521 }
3522 } else {
3523 const Fractional diff_ub =
3524 variables_info_.GetVariableUpperBounds()[entering_col] -
3525 entering_value;
3526 const Fractional diff_lb =
3527 entering_value -
3528 variables_info_.GetVariableLowerBounds()[entering_col];
3529 if (diff_lb <= diff_ub) {
3530 fake_rc = 1.0;
3531 } else {
3532 fake_rc = -1.0;
3533 }
3534 }
3535
3536 // Solve the system B.d = a with a the entering column.
3537 ComputeDirection(entering_col);
3538
3539 Fractional step_length;
3540 RowIndex leaving_row;
3541 Fractional target_bound;
3542
3543 GLOP_RETURN_IF_ERROR(ChooseLeavingVariableRow(entering_col, fake_rc,
3544 &refactorize, &leaving_row,
3545 &step_length, &target_bound));
3546
3547 if (refactorize) continue;
3548
3549 // At this point, we know the iteration will finish or stop with an error.
3550 super_basic_cols.pop_back();
3551
3552 if (step_length == kInfinity || step_length == -kInfinity) {
3553 if (variables_info_.GetTypeRow()[entering_col] ==
3555 step_length = std::fabs(entering_value);
3556 } else {
3557 VLOG(1) << "Infinite step for bounded variable ?!";
3558 problem_status_ = ProblemStatus::ABNORMAL;
3559 break;
3560 }
3561 }
3562
3563 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
3564
3565 // Store the leaving_col before basis_ change.
3566 const ColIndex leaving_col =
3567 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3568
3569 // An iteration is called 'degenerate' if the leaving variable is already
3570 // primal-infeasible and we make it even more infeasible or if we do a zero
3571 // step.
3572 // TODO(user): Test setting the step size to zero for degenerate steps.
3573 // We don't need to force a positive step because each super-basic variable
3574 // is pivoted in exactly once.
3575 bool is_degenerate = false;
3576 if (leaving_row != kInvalidRow) {
3577 Fractional dir = -direction_[leaving_row] * step;
3578 is_degenerate =
3579 (dir == 0.0) ||
3580 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3581 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3582
3583 // If the iteration is not degenerate, the leaving variable should go to
3584 // its exact target bound (it is how the step is computed).
3585 if (!is_degenerate) {
3586 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3587 target_bound));
3588 }
3589 }
3590
3591 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3592 if (leaving_row != kInvalidRow) {
3593 if (!is_degenerate) {
3594 // On a non-degenerate iteration, the leaving variable should be at its
3595 // exact bound. This corrects an eventual small numerical error since
3596 // 'value + direction * step' where step is
3597 // '(target_bound - value) / direction'
3598 // may be slighlty different from target_bound.
3599 variable_values_.Set(leaving_col, target_bound);
3600 }
3602 UpdateAndPivot(entering_col, leaving_row, target_bound));
3604 if (is_degenerate) {
3605 timer.AlsoUpdate(&iteration_stats_.degenerate);
3606 } else {
3607 timer.AlsoUpdate(&iteration_stats_.normal);
3608 }
3609 });
3610 } else {
3611 // Snap the super-basic variable to its bound. Note that
3612 // variable_values_.UpdateOnPivoting() should already be close to that but
3613 // here we make sure it is exact and remove any small numerical errors.
3614 if (variables_info_.GetTypeRow()[entering_col] ==
3616 variable_values_.Set(entering_col, 0.0);
3617 } else if (step > 0.0) {
3618 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3620 } else if (step < 0.0) {
3621 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3623 }
3624 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3625 }
3626
3627 ++num_iterations_;
3628 }
3629
3630 if (!super_basic_cols.empty()) {
3631 SOLVER_LOG(logger_, "Push terminated early with ", super_basic_cols.size(),
3632 " super-basic variables remaining.");
3633 }
3634
3635 // TODO(user): What status should be returned if the time limit is hit?
3636 // If the optimization phase finished, then OPTIMAL is technically correct
3637 // but also misleading.
3638
3639 return Status::OK();
3640}
3641
3642ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3643 DCHECK_ROW_BOUNDS(row);
3644 return first_slack_col_ + RowToColIndex(row);
3645}
3646
3648 std::string result;
3649 result.append(iteration_stats_.StatString());
3650 result.append(ratio_test_stats_.StatString());
3651 result.append(entering_variable_.StatString());
3652 result.append(dual_prices_.StatString());
3653 result.append(reduced_costs_.StatString());
3654 result.append(variable_values_.StatString());
3655 result.append(primal_edge_norms_.StatString());
3656 result.append(dual_edge_norms_.StatString());
3657 result.append(update_row_.StatString());
3658 result.append(basis_factorization_.StatString());
3659 result.append(function_stats_.StatString());
3660 return result;
3661}
3662
3663void RevisedSimplex::DisplayAllStats() {
3664 if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3665 absl::FPrintF(stderr, "%s", StatString());
3666 absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3667 }
3668}
3669
3670Fractional RevisedSimplex::ComputeObjectiveValue() const {
3671 SCOPED_TIME_STAT(&function_stats_);
3672 return PreciseScalarProduct(objective_,
3673 Transpose(variable_values_.GetDenseRow()));
3674}
3675
3676Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3677 SCOPED_TIME_STAT(&function_stats_);
3678 const Fractional sum = PreciseScalarProduct(
3679 objective_, Transpose(variable_values_.GetDenseRow()));
3680 return objective_scaling_factor_ * (sum + objective_offset_);
3681}
3682
3683void RevisedSimplex::SetRandom(absl::BitGenRef random) {
3684 random_ = random;
3685 dual_prices_.SetRandom(random_);
3686 entering_variable_.SetRandom(random_);
3687 reduced_costs_.SetRandom(random_);
3688 primal_prices_.SetRandom(random_);
3689}
3690
3692 SCOPED_TIME_STAT(&function_stats_);
3693 deterministic_random_.seed(parameters.random_seed());
3694 absl_random_ = absl::BitGen(absl::SeedSeq({parameters.random_seed()}));
3695 SetRandom((UseAbslRandom(parameters)
3696 ? absl::BitGenRef(absl_random_)
3697 : absl::BitGenRef(deterministic_random_)));
3698 initial_parameters_ = parameters;
3699 parameters_ = parameters;
3700 PropagateParameters();
3701}
3702
3703void RevisedSimplex::PropagateParameters() {
3704 SCOPED_TIME_STAT(&function_stats_);
3705 basis_factorization_.SetParameters(parameters_);
3706 entering_variable_.SetParameters(parameters_);
3707 reduced_costs_.SetParameters(parameters_);
3708 dual_edge_norms_.SetParameters(parameters_);
3709 primal_edge_norms_.SetParameters(parameters_);
3710 update_row_.SetParameters(parameters_);
3711}
3712
3713void RevisedSimplex::DisplayIterationInfo(bool primal,
3714 RefactorizationReason reason) {
3715 if (!logger_->LoggingIsEnabled()) return;
3716 const std::string first_word = primal ? "Primal " : "Dual ";
3717
3718 // We display the info on each re-factorization, and it is nice to show what
3719 // triggered the issue. Note that we don't display normal refactorization when
3720 // we decide that it is worth it for the solve time or we reach the fixed
3721 // refactorization period.
3722 std::string info;
3723 if (reason != RefactorizationReason::DEFAULT) {
3724 switch (reason) {
3725 case RefactorizationReason::DEFAULT:
3726 info = " [default]";
3727 break;
3728 case RefactorizationReason::SMALL_PIVOT:
3729 info = " [small pivot]";
3730 break;
3731 case RefactorizationReason::IMPRECISE_PIVOT:
3732 info = " [imprecise pivot]";
3733 break;
3734 case RefactorizationReason::NORM:
3735 info = " [norms]";
3736 break;
3737 case RefactorizationReason::RC:
3738 info = " [reduced costs]";
3739 break;
3740 case RefactorizationReason::VAR_VALUES:
3741 info = " [var values]";
3742 break;
3743 case RefactorizationReason::FINAL_CHECK:
3744 info = " [check]";
3745 break;
3746 }
3747 }
3748
3749 switch (phase_) {
3750 case Phase::FEASIBILITY: {
3751 const int64_t iter = num_iterations_;
3752 std::string name;
3753 Fractional objective;
3754 if (parameters_.use_dual_simplex()) {
3755 if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
3756 objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
3757 } else {
3758 // The internal objective of the transformed problem is the negation
3759 // of the sum of the dual infeasibility of the original problem.
3760 objective = -PreciseScalarProduct(
3761 objective_, Transpose(variable_values_.GetDenseRow()));
3762 }
3763 name = "sum_dual_infeasibilities";
3764 } else {
3765 objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
3766 name = "sum_primal_infeasibilities";
3767 }
3768
3769 SOLVER_LOG(logger_, first_word, "feasibility phase, iteration # ", iter,
3770 ", ", name, " = ", absl::StrFormat("%.15E", objective), info);
3771 break;
3772 }
3773 case Phase::OPTIMIZATION: {
3774 const int64_t iter = num_iterations_ - num_feasibility_iterations_;
3775 // Note that in the dual phase II, ComputeObjectiveValue() is also
3776 // computing the dual objective even if it uses the variable values.
3777 // This is because if we modify the bounds to make the problem
3778 // primal-feasible, we are at the optimal and hence the two objectives
3779 // are the same.
3780 const Fractional objective = ComputeInitialProblemObjectiveValue();
3781 SOLVER_LOG(logger_, first_word, "optimization phase, iteration # ", iter,
3782 ", objective = ", absl::StrFormat("%.15E", objective), info);
3783 break;
3784 }
3785 case Phase::PUSH: {
3786 const int64_t iter = num_iterations_ - num_feasibility_iterations_ -
3787 num_optimization_iterations_;
3788 SOLVER_LOG(logger_, first_word, "push phase, iteration # ", iter,
3789 ", remaining_variables_to_push = ",
3790 ComputeNumberOfSuperBasicVariables(), info);
3791 }
3792 }
3793}
3794
3795void RevisedSimplex::DisplayErrors() {
3796 if (!logger_->LoggingIsEnabled()) return;
3797 SOLVER_LOG(logger_,
3798 "Current status: ", GetProblemStatusString(problem_status_));
3799 SOLVER_LOG(logger_, "Primal infeasibility (bounds) = ",
3800 variable_values_.ComputeMaximumPrimalInfeasibility());
3801 SOLVER_LOG(logger_, "Primal residual |A.x - b| = ",
3802 variable_values_.ComputeMaximumPrimalResidual());
3803 SOLVER_LOG(logger_, "Dual infeasibility (reduced costs) = ",
3804 reduced_costs_.ComputeMaximumDualInfeasibility());
3805 SOLVER_LOG(logger_, "Dual residual |c_B - y.B| = ",
3806 reduced_costs_.ComputeMaximumDualResidual());
3807}
3808
3809namespace {
3810
3811std::string StringifyMonomialWithFlags(const Fractional a,
3812 absl::string_view x) {
3813 return StringifyMonomial(
3814 a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3815}
3816
3817// Returns a string representing the rational approximation of x or a decimal
3818// approximation of x according to
3819// absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3820std::string StringifyWithFlags(const Fractional x) {
3821 return Stringify(x,
3822 absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3823}
3824
3825} // namespace
3826
3827std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3828 std::string output;
3829 VariableType variable_type = variables_info_.GetTypeRow()[col];
3830 VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3831 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3832 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3833 absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3834 variable_name_[col],
3835 StringifyWithFlags(variable_values_.Get(col)),
3836 GetVariableStatusString(variable_status),
3837 GetVariableTypeString(variable_type),
3838 StringifyWithFlags(lower_bounds[col]),
3839 StringifyWithFlags(upper_bounds[col]));
3840 return output;
3841}
3842
3843void RevisedSimplex::DisplayInfoOnVariables() const {
3844 if (VLOG_IS_ON(3)) {
3845 for (ColIndex col(0); col < num_cols_; ++col) {
3846 const Fractional variable_value = variable_values_.Get(col);
3847 const Fractional objective_coefficient = objective_[col];
3848 const Fractional objective_contribution =
3849 objective_coefficient * variable_value;
3850 VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3851 << StringifyWithFlags(variable_value) << " * "
3852 << StringifyWithFlags(objective_coefficient)
3853 << "(obj) = " << StringifyWithFlags(objective_contribution);
3854 }
3855 VLOG(3) << "------";
3856 }
3857}
3858
3859void RevisedSimplex::DisplayVariableBounds() {
3860 if (VLOG_IS_ON(3)) {
3861 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3862 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3863 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3864 for (ColIndex col(0); col < num_cols_; ++col) {
3865 switch (variable_type[col]) {
3867 break;
3869 VLOG(3) << variable_name_[col]
3870 << " >= " << StringifyWithFlags(lower_bounds[col]) << ";";
3871 break;
3873 VLOG(3) << variable_name_[col]
3874 << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3875 break;
3877 VLOG(3) << StringifyWithFlags(lower_bounds[col])
3878 << " <= " << variable_name_[col]
3879 << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3880 break;
3882 VLOG(3) << variable_name_[col] << " = "
3883 << StringifyWithFlags(lower_bounds[col]) << ";";
3884 break;
3885 default: // This should never happen.
3886 LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3887 break;
3888 }
3889 }
3890 }
3891}
3892
3893util_intops::StrongVector<RowIndex, SparseRow>
3895 util_intops::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3896 for (ColIndex col(0); col < num_cols_; ++col) {
3897 ComputeDirection(col);
3898 for (const auto e : direction_) {
3899 if (column_scales == nullptr) {
3900 dictionary[e.row()].SetCoefficient(col, e.coefficient());
3901 continue;
3902 }
3903 const Fractional numerator =
3904 col < column_scales->size() ? (*column_scales)[col] : 1.0;
3905 const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3906 ? (*column_scales)[GetBasis(e.row())]
3907 : 1.0;
3908 dictionary[e.row()].SetCoefficient(
3909 col, direction_[e.row()] * (numerator / denominator));
3910 }
3911 }
3912 return dictionary;
3913}
3914
3916 const LinearProgram& linear_program, const BasisState& state) {
3917 LoadStateForNextSolve(state);
3918 Status status = Initialize(linear_program);
3919 if (status.ok()) {
3920 variable_values_.RecomputeBasicVariableValues();
3921 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3922 }
3923}
3924
3925void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3926 if (VLOG_IS_ON(3)) {
3927 variable_name_.resize(num_cols_, "");
3928 DisplayInfoOnVariables();
3929
3930 std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3931 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
3932 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3933 absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3934 variable_name_[col]));
3935 }
3936 VLOG(3) << output << ";";
3937
3938 const RevisedSimplexDictionary dictionary(nullptr, this);
3939 RowIndex r(0);
3940 for (const SparseRow& row : dictionary) {
3941 output.clear();
3942 ColIndex basic_col = basis_[r];
3943 absl::StrAppend(&output, variable_name_[basic_col], " = ",
3944 StringifyWithFlags(variable_values_.Get(basic_col)));
3945 for (const SparseRowEntry e : row) {
3946 if (e.col() != basic_col) {
3947 absl::StrAppend(&output,
3948 StringifyMonomialWithFlags(e.coefficient(),
3949 variable_name_[e.col()]));
3950 }
3951 }
3952 VLOG(3) << output << ";";
3953 }
3954 VLOG(3) << "------";
3955 DisplayVariableBounds();
3956 ++r;
3957 }
3958}
3959
3960void RevisedSimplex::DisplayProblem() {
3961 // This function has a complexity in O(num_rows * num_cols *
3962 // num_non_zeros_in_row).
3963 if (VLOG_IS_ON(3)) {
3964 variable_name_.resize(num_cols_, "");
3965 DisplayInfoOnVariables();
3966 std::string output = "min: ";
3967 bool has_objective = false;
3968 for (ColIndex col(0); col < num_cols_; ++col) {
3969 const Fractional coeff = objective_[col];
3970 has_objective |= (coeff != 0.0);
3971 absl::StrAppend(&output,
3972 StringifyMonomialWithFlags(coeff, variable_name_[col]));
3973 }
3974 if (!has_objective) {
3975 absl::StrAppend(&output, " 0");
3976 }
3977 VLOG(3) << output << ";";
3978 for (RowIndex row(0); row < num_rows_; ++row) {
3979 output = "";
3980 for (ColIndex col(0); col < num_cols_; ++col) {
3981 absl::StrAppend(&output,
3982 StringifyMonomialWithFlags(
3983 compact_matrix_.column(col).LookUpCoefficient(row),
3984 variable_name_[col]));
3985 }
3986 VLOG(3) << output << " = 0;";
3987 }
3988 VLOG(3) << "------";
3989 }
3990}
3991
3992void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3993 DCHECK(time_limit != nullptr);
3994 const double current_deterministic_time = DeterministicTime();
3995 const double deterministic_time_delta =
3996 current_deterministic_time - last_deterministic_time_update_;
3997 time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3998 last_deterministic_time_update_ = current_deterministic_time;
3999}
4000
4001#undef DCHECK_COL_BOUNDS
4002#undef DCHECK_ROW_BOUNDS
4003
4004} // namespace glop
4005} // namespace operations_research
bool LoggingIsEnabled() const
Returns true iff logging is enabled.
Definition logging.h:50
void SetParameters(const GlopParameters &parameters)
Sets the parameters for this component.
void SetTimeLimit(TimeLimit *time_limit)
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
void SetParameters(const GlopParameters &parameters)
Sets the parameters.
static constexpr InitialBasisHeuristic NONE
static constexpr InitialBasisHeuristic BIXBY
static constexpr InitialBasisHeuristic TRIANGULAR
static constexpr InitialBasisHeuristic MAROS
const DenseRow & objective_coefficients() const
Returns the objective coefficients (or cost) of variables as a row vector.
Definition lp_data.h:230
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
void SetParameters(const GlopParameters &parameters)
Sets the pricing parameters. This does not change the pricing rule.
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
ConstraintStatus GetConstraintStatus(RowIndex row) const
void LoadStateForNextSolve(const BasisState &state)
Uses the given state as a warm-start for the next Solve() call.
Fractional GetDualValue(RowIndex row) const
Fractional GetReducedCost(ColIndex col) const
Fractional GetConstraintActivity(RowIndex row) const
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
const DenseRow & GetDualRayRowCombination() const
This is the "dual ray" linear combination of the matrix rows.
RowIndex GetProblemNumRows() const
Getters to retrieve all the information computed by the last Solve().
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
std::string StatString()
Returns statistics about this class as a string.
const BasisFactorization & GetBasisFactorization() const
VariableStatus GetVariableStatus(ColIndex col) const
void SetParameters(const GlopParameters &parameters)
Sets or gets the algorithm parameters to be used on the next Solve().
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
ABSL_MUST_USE_RESULT Status MinimizeFromTransposedMatrixWithSlack(const DenseRow &objective, Fractional objective_scaling_factor, Fractional objective_offset, TimeLimit *time_limit)
void SetStartingVariableValuesForNextSolve(const DenseRow &values)
static Status OK()
Improves readability but identical to 0-arg constructor.
Definition status.h:55
@ ERROR_LU
The LU factorization of the current basis couldn't be computed.
Definition status.h:34
StrictITISpan< RowIndex, const Fractional > ConstView
Definition lp_types.h:291
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
Fractional Get(ColIndex col) const
Getters for the variable values.
const DenseBitRow & GetCanDecreaseBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetIsRelevantBitRow() const
time_limit
Definition solve.cc:22
constexpr ColIndex kInvalidCol(-1)
std::string GetVariableTypeString(VariableType variable_type)
Returns the string representation of the VariableType enum.
Definition lp_types.cc:56
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition lp_types.h:394
Fractional Square(Fractional f)
Definition lp_utils.h:41
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Column of booleans.
Definition lp_types.h:383
Permutation< ColIndex > ColumnPermutation
std::string Stringify(const Fractional x, bool fraction)
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:111
Bitset64< ColIndex > DenseBitRow
Row of bits.
Definition lp_types.h:375
const DenseRow & Transpose(const DenseColumn &col)
Similar comment as the other Transpose() implementation above.
Definition lp_utils.h:199
std::string GetVariableStatusString(VariableStatus status)
Returns the string representation of the VariableStatus enum.
Definition lp_types.cc:75
Index ColToIntIndex(ColIndex col)
Get the integer index corresponding to the col.
Definition lp_types.h:60
constexpr const uint64_t kDeterministicSeed
ColIndex RowToColIndex(RowIndex row)
Get the ColIndex corresponding to the column # row.
Definition lp_types.h:54
bool IsFinite(Fractional value)
Definition lp_types.h:94
constexpr RowIndex kInvalidRow(-1)
VariableType
Different types of variables.
Definition lp_types.h:178
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Row of variable types.
Definition lp_types.h:369
Fractional InfinityNorm(const DenseColumn &v)
Returns the maximum of the coefficients of 'v'.
Definition lp_utils.cc:151
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
Changes the sign of all the entries in the given vector.
Definition lp_utils.h:312
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Row of variable statuses.
Definition lp_types.h:372
std::string StringifyMonomial(const Fractional a, absl::string_view x, bool fraction)
constexpr Fractional kInfinity
Infinity for type Fractional.
Definition lp_types.h:87
StrictITIVector< RowIndex, Fractional > DenseColumn
Column-vector types. Column-vector types are indexed by a row index.
Definition lp_types.h:380
StrictITIVector< ColIndex, Fractional > DenseRow
Row-vector types. Row-vector types are indexed by a column index.
Definition lp_types.h:351
ProblemStatus
Different statuses for a given problem.
Definition lp_types.h:105
@ ABNORMAL
An error occurred during the solving process.
Definition lp_types.h:158
@ INIT
The solver didn't had a chance to prove anything.
Definition lp_types.h:146
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Returns the ConstraintStatus corresponding to a given VariableStatus.
Definition lp_types.cc:113
static double DeterministicTimeForFpOperations(int64_t n)
Definition lp_types.h:433
void ApplyColumnPermutationToRowIndexedVector(StrictITISpan< ColIndex, const ColIndex > col_perm, RowIndexedVector *v, RowIndexedVector *tmp)
std::string GetProblemStatusString(ProblemStatus problem_status)
Returns the string representation of the ProblemStatus enum.
Definition lp_types.cc:23
In SWIG mode, we don't want anything besides these top-level includes.
util_intops::StrongVector< RowEntryIndex, SubsetIndex > SparseRow
Definition base_types.h:62
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition stats.h:409
STL namespace.
#define RETURN_IF_NULL(x)
#define DCHECK_ROW_BOUNDS(row)
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
#define DCHECK_COL_BOUNDS(col)
#define IF_STATS_ENABLED(instructions)
Definition stats.h:414
#define SCOPED_TIME_STAT(stats)
Definition stats.h:421
#define GLOP_RETURN_IF_ERROR(function_call)
Macro to simplify error propagation between function returning Status.
Definition status.h:71
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Macro to check that a pointer argument is not null.
Definition status.h:86
#define SOLVER_LOG(logger,...)
Definition logging.h:110