Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
revised_simplex.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <cstdlib>
20#include <functional>
21#include <map>
22#include <string>
23#include <utility>
24#include <vector>
25
26#include "absl/flags/flag.h"
27#include "absl/log/check.h"
28#include "absl/random/bit_gen_ref.h"
29#include "absl/random/random.h"
30#include "absl/random/seed_sequences.h"
31#include "absl/strings/match.h"
32#include "absl/strings/str_cat.h"
33#include "absl/strings/str_format.h"
34#include "absl/strings/string_view.h"
39#include "ortools/glop/parameters.pb.h"
40#include "ortools/glop/status.h"
54#include "ortools/util/stats.h"
56
57ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
58 "Display numbers as fractions.");
59ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
60 "Stop after first basis has been computed.");
61ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
62 "Stop after first phase has been completed.");
63ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
64
65namespace operations_research {
66namespace glop {
67namespace {
68
69// Calls the given closure upon destruction. It can be used to ensure that a
70// closure is executed whenever a function returns.
71class Cleanup {
72 public:
73 explicit Cleanup(std::function<void()> closure)
74 : closure_(std::move(closure)) {}
75 ~Cleanup() { closure_(); }
76
77 private:
78 std::function<void()> closure_;
79};
80} // namespace
81
82#define DCHECK_COL_BOUNDS(col) \
83 { \
84 DCHECK_LE(0, col); \
85 DCHECK_GT(num_cols_, col); \
86 }
87
88// TODO(user): Remove this function.
89#define DCHECK_ROW_BOUNDS(row) \
90 { \
91 DCHECK_LE(0, row); \
92 DCHECK_GT(num_rows_, row); \
93 }
94
95constexpr const uint64_t kDeterministicSeed = 42;
96
97namespace {
98
99bool UseAbslRandom() { return false; }
100
101} // namespace
102
104 : problem_status_(ProblemStatus::INIT),
105 objective_(),
106 basis_(),
107 variable_name_(),
108 direction_(),
109 error_(),
110 random_(UseAbslRandom() ? absl::BitGenRef(absl_random_)
111 : absl::BitGenRef(deterministic_random_)),
112 basis_factorization_(&compact_matrix_, &basis_),
113 variables_info_(compact_matrix_),
114 primal_edge_norms_(compact_matrix_, variables_info_,
115 basis_factorization_),
116 dual_edge_norms_(basis_factorization_),
117 dual_prices_(random_),
118 variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
119 basis_factorization_, &dual_edge_norms_, &dual_prices_),
120 update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
121 basis_factorization_),
122 reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
123 basis_factorization_, random_),
124 entering_variable_(variables_info_, random_, &reduced_costs_),
125 primal_prices_(random_, variables_info_, &primal_edge_norms_,
126 &reduced_costs_),
127 iteration_stats_(),
128 ratio_test_stats_(),
129 function_stats_("SimplexFunctionStats"),
130 parameters_(),
131 test_lu_() {
132 SetParameters(parameters_);
133}
134
136 SCOPED_TIME_STAT(&function_stats_);
137 solution_state_.statuses.clear();
138 variable_starting_values_.clear();
139}
140
142 // We avoid marking the state as set externally if it is the same as the
143 // current one.
144 //
145 // TODO(user): Add comparison operator.
146 if (state.statuses == solution_state_.statuses) return;
147
148 solution_state_ = state;
149 solution_state_has_been_set_externally_ = true;
150}
151
153 const DenseRow& values) {
154 variable_starting_values_ = values;
155}
156
158 notify_that_matrix_is_unchanged_ = true;
159}
160
162 notify_that_matrix_is_unchanged_ = false;
163}
164
166 SCOPED_TIME_STAT(&function_stats_);
167 DCHECK(lp.IsCleanedUp());
169 Cleanup update_deterministic_time_on_return(
170 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
171
172 default_logger_.EnableLogging(parameters_.log_search_progress());
173 default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
174 SOLVER_LOG(logger_, "");
175
176 // Initialization. Note That Initialize() must be called first since it
177 // analyzes the current solver state.
178 const double start_time = time_limit->GetElapsedTime();
179 GLOP_RETURN_IF_ERROR(Initialize(lp));
180 if (logger_->LoggingIsEnabled()) {
181 DisplayBasicVariableStatistics();
182 }
183
184 dual_infeasibility_improvement_direction_.clear();
185 update_row_.Invalidate();
186 test_lu_.Clear();
187 problem_status_ = ProblemStatus::INIT;
188 phase_ = Phase::FEASIBILITY;
189 num_iterations_ = 0;
190 num_feasibility_iterations_ = 0;
191 num_optimization_iterations_ = 0;
192 num_push_iterations_ = 0;
193 feasibility_time_ = 0.0;
194 optimization_time_ = 0.0;
195 push_time_ = 0.0;
196 total_time_ = 0.0;
197
198 // In case we abort because of an error, we cannot assume that the current
199 // solution state will be in sync with all our internal data structure. In
200 // case we abort without resetting it, setting this allow us to still use the
201 // previous state info, but we will double-check everything.
202 solution_state_has_been_set_externally_ = true;
203
204 if (VLOG_IS_ON(2)) {
205 ComputeNumberOfEmptyRows();
206 ComputeNumberOfEmptyColumns();
207 DisplayProblem();
208 }
209 if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
210 DisplayAllStats();
211 return Status::OK();
212 }
213
214 const bool use_dual = parameters_.use_dual_simplex();
215
216 // TODO(user): Avoid doing the first phase checks when we know from the
217 // incremental solve that the solution is already dual or primal feasible.
218 SOLVER_LOG(logger_, "");
219 primal_edge_norms_.SetPricingRule(parameters_.feasibility_rule());
220 if (use_dual) {
221 if (parameters_.perturb_costs_in_dual_simplex()) {
222 reduced_costs_.PerturbCosts();
223 }
224
225 if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
226 variables_info_.MakeBoxedVariableRelevant(false);
228 DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
229
230 if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
231 // Note(user): In most cases, the matrix will already be refactorized
232 // and both Refactorize() and PermuteBasis() will do nothing. However,
233 // if the time limit is reached during the first phase, this might not
234 // be the case and RecomputeBasicVariableValues() below DCHECKs that the
235 // matrix is refactorized. This is not required, but we currently only
236 // want to recompute values from scratch when the matrix was just
237 // refactorized to maximize precision.
238 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
239 PermuteBasis();
240
241 variables_info_.MakeBoxedVariableRelevant(true);
242 reduced_costs_.MakeReducedCostsPrecise();
243
244 // This is needed to display errors properly.
245 MakeBoxedVariableDualFeasible(
246 variables_info_.GetNonBasicBoxedVariables(),
247 /*update_basic_values=*/false);
248 variable_values_.RecomputeBasicVariableValues();
249 }
250 } else {
251 // Test initial dual infeasibility, ignoring boxed variables. We currently
252 // refactorize/recompute the reduced costs if not already done.
253 // TODO(user): Not ideal in an incremental setting.
254 reduced_costs_.MakeReducedCostsPrecise();
255 bool refactorize = reduced_costs_.NeedsBasisRefactorization();
256 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
257
258 const Fractional initial_infeasibility =
260 if (initial_infeasibility <
261 reduced_costs_.GetDualFeasibilityTolerance()) {
262 SOLVER_LOG(logger_, "Initial basis is dual feasible.");
263 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
264 MakeBoxedVariableDualFeasible(
265 variables_info_.GetNonBasicBoxedVariables(),
266 /*update_basic_values=*/false);
267 variable_values_.RecomputeBasicVariableValues();
268 } else {
269 // Transform problem and recompute variable values.
270 variables_info_.TransformToDualPhaseIProblem(
271 reduced_costs_.GetDualFeasibilityTolerance(),
272 reduced_costs_.GetReducedCosts());
273 DenseRow zero; // We want the FREE variable at zero here.
274 variable_values_.ResetAllNonBasicVariableValues(zero);
275 variable_values_.RecomputeBasicVariableValues();
276
277 // Optimize.
278 DisplayErrors();
279 GLOP_RETURN_IF_ERROR(DualMinimize(false, time_limit));
280
281 // Restore original problem and recompute variable values. Note that we
282 // need the reduced cost on the fixed positions here.
283 variables_info_.EndDualPhaseI(
284 reduced_costs_.GetDualFeasibilityTolerance(),
285 reduced_costs_.GetFullReducedCosts());
286 variable_values_.ResetAllNonBasicVariableValues(
287 variable_starting_values_);
288 variable_values_.RecomputeBasicVariableValues();
289
290 // TODO(user): Note that if there was cost shifts, we just keep them
291 // until the end of the optim.
292 //
293 // TODO(user): What if slightly infeasible? we shouldn't really stop.
294 // Call primal ? use higher tolerance ? It seems we can always kind of
295 // continue and deal with the issue later. Find a way other than this +
296 // 1e-6 hack.
297 if (problem_status_ == ProblemStatus::OPTIMAL) {
298 if (reduced_costs_.ComputeMaximumDualInfeasibility() <
299 reduced_costs_.GetDualFeasibilityTolerance() + 1e-6) {
300 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
301 } else {
302 SOLVER_LOG(logger_, "Infeasible after first phase.");
303 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
304 }
305 }
306 }
307 }
308 } else {
309 GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
310
311 // After the primal phase I, we need to restore the objective.
312 if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
313 InitializeObjectiveAndTestIfUnchanged(lp);
314 reduced_costs_.ResetForNewObjective();
315 }
316 }
317
318 DisplayErrors();
319
320 phase_ = Phase::OPTIMIZATION;
321 feasibility_time_ = time_limit->GetElapsedTime() - start_time;
322 primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
323 num_feasibility_iterations_ = num_iterations_;
324
325 // Because of shifts or perturbations, we may need to re-run a dual simplex
326 // after the primal simplex finished, or the opposite.
327 //
328 // We alter between solving with primal and dual Phase II algorithm as long as
329 // time limit permits *and* we did not yet achieve the desired precision.
330 // I.e., we run iteration i if the solution from iteration i-1 was not precise
331 // after we removed the bound and cost shifts and perturbations.
332 //
333 // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
334 // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
335 // (i.e., these statuses are not necesserily a consequence of hitting a time
336 // limit).
337 SOLVER_LOG(logger_, "");
338 for (int num_optims = 0;
339 // We want to enter the loop when both num_optims and num_iterations_ are
340 // *equal* to the corresponding limits (to return a meaningful status
341 // when the limits are set to 0).
342 num_optims <= parameters_.max_number_of_reoptimizations() &&
343 !objective_limit_reached_ &&
344 (num_iterations_ == 0 ||
345 num_iterations_ < parameters_.max_number_of_iterations()) &&
346 !time_limit->LimitReached() &&
347 !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
348 (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
349 problem_status_ == ProblemStatus::DUAL_FEASIBLE);
350 ++num_optims) {
351 if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
352 // Run the primal simplex.
353 GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
354 } else {
355 // Run the dual simplex.
357 DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
358 }
359
360 // PrimalMinimize() or DualMinimize() always double check the result with
361 // maximum precision by refactoring the basis before exiting (except if an
362 // iteration or time limit was reached).
363 DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
364 problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
365 basis_factorization_.IsRefactorized());
366
367 // If SetIntegralityScale() was called, we perform a polish operation.
368 if (!integrality_scale_.empty() &&
369 problem_status_ == ProblemStatus::OPTIMAL) {
371 }
372
373 // Remove the bound and cost shifts (or perturbations).
374 //
375 // Note(user): Currently, we never do both at the same time, so we could
376 // be a bit faster here, but then this is quick anyway.
377 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
378 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
379 PermuteBasis();
380 variable_values_.RecomputeBasicVariableValues();
381 reduced_costs_.ClearAndRemoveCostShifts();
382
383 DisplayErrors();
384
385 // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
386 // status by checking with the other phase I that the problem is really
387 // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
388 // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
389 // OPTIMAL and the dual does not have issues on this problem.
390 //
391 // TODO(user): There is another issue on infeas/qual.mps. I think we should
392 // just check the dual ray, not really the current solution dual
393 // feasibility.
394 if (problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
395 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
396 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
397 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
398 variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
399 SOLVER_LOG(logger_,
400 "PRIMAL_UNBOUNDED was reported, but the residual and/or "
401 "dual infeasibility is above the tolerance");
402 if (parameters_.change_status_to_imprecise()) {
403 problem_status_ = ProblemStatus::IMPRECISE;
404 }
405 break;
406 }
407
408 // All of our tolerance are okay, but the dual ray might be fishy. This
409 // happens on l30.mps and on L1_sixm250obs.mps.gz. If the ray do not
410 // seems good enough, we might actually just be at the optimal and have
411 // trouble going down to our relatively low default tolerances.
412 //
413 // The difference bettween optimal and unbounded can be thin. Say you
414 // have a free variable with no constraint and a cost of epsilon,
415 // depending on epsilon and your tolerance, this will either cause the
416 // problem to be unbounded, or can be ignored.
417 //
418 // Here, we compute what is the cost gain if we move from the current
419 // value with the ray up to the bonds + tolerance. If this gain is < 1,
420 // it is hard to claim we are really unbounded. This is a quick
421 // heuristic to error on the side of optimality rather than
422 // unboundedness.
423 double max_magnitude = 0.0;
424 double min_distance = kInfinity;
425 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
426 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
427 double cost_delta = 0.0;
428 for (ColIndex col(0); col < num_cols_; ++col) {
429 cost_delta += solution_primal_ray_[col] * objective_[col];
430 if (solution_primal_ray_[col] > 0 && upper_bounds[col] != kInfinity) {
431 const Fractional value = variable_values_.Get(col);
432 const Fractional distance = (upper_bounds[col] - value + tolerance) /
433 solution_primal_ray_[col];
434 min_distance = std::min(distance, min_distance);
435 max_magnitude = std::max(solution_primal_ray_[col], max_magnitude);
436 }
437 if (solution_primal_ray_[col] < 0 && lower_bounds[col] != -kInfinity) {
438 const Fractional value = variable_values_.Get(col);
439 const Fractional distance = (value - lower_bounds[col] + tolerance) /
440 -solution_primal_ray_[col];
441 min_distance = std::min(distance, min_distance);
442 max_magnitude = std::max(-solution_primal_ray_[col], max_magnitude);
443 }
444 }
445 SOLVER_LOG(logger_, "Primal unbounded ray: max blocking magnitude = ",
446 max_magnitude, ", min distance to bound + ", tolerance, " = ",
447 min_distance, ", ray cost delta = ", cost_delta);
448 if (min_distance * std::abs(cost_delta) < 1 &&
449 reduced_costs_.ComputeMaximumDualInfeasibility() <= tolerance) {
450 SOLVER_LOG(logger_,
451 "PRIMAL_UNBOUNDED was reported, but the tolerance are good "
452 "and the unbounded ray is not great.");
453 SOLVER_LOG(logger_,
454 "The difference between unbounded and optimal can depends "
455 "on a slight change of tolerance, trying to see if we are "
456 "at OPTIMAL after postsolve.");
457 problem_status_ = ProblemStatus::OPTIMAL;
458 }
459 break;
460 }
461 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
462 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
463 if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
464 variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
465 reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
466 SOLVER_LOG(logger_,
467 "DUAL_UNBOUNDED was reported, but the residual and/or "
468 "dual infeasibility is above the tolerance");
469 if (parameters_.change_status_to_imprecise()) {
470 problem_status_ = ProblemStatus::IMPRECISE;
471 }
472 }
473
474 // Validate the dual ray that prove primal infeasibility.
475 //
476 // By taking the linear combination of the constraint, we should arrive
477 // to an infeasible <= 0 constraint using the variable bounds.
478 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
479 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
480 Fractional implied_lb = 0.0;
481 Fractional error = 0.0;
482 for (ColIndex col(0); col < num_cols_; ++col) {
483 const Fractional coeff = solution_dual_ray_row_combination_[col];
484 if (coeff > 0) {
485 if (lower_bounds[col] == -kInfinity) {
486 error = std::max(error, coeff);
487 } else {
488 implied_lb += coeff * lower_bounds[col];
489 }
490 } else if (coeff < 0) {
491 if (upper_bounds[col] == kInfinity) {
492 error = std::max(error, -coeff);
493 } else {
494 implied_lb += coeff * upper_bounds[col];
495 }
496 }
497 }
498 SOLVER_LOG(logger_, "Dual ray error=", error,
499 " infeasibility=", implied_lb);
500 if (implied_lb < tolerance || error > tolerance) {
501 SOLVER_LOG(logger_,
502 "DUAL_UNBOUNDED was reported, but the dual ray is not "
503 "proving infeasibility with high enough tolerance");
504 if (parameters_.change_status_to_imprecise()) {
505 problem_status_ = ProblemStatus::IMPRECISE;
506 }
507 }
508 break;
509 }
510
511 // Change the status, if after the shift and perturbation removal the
512 // problem is not OPTIMAL anymore.
513 if (problem_status_ == ProblemStatus::OPTIMAL) {
514 const Fractional solution_tolerance =
515 parameters_.solution_feasibility_tolerance();
516 const Fractional primal_residual =
517 variable_values_.ComputeMaximumPrimalResidual();
518 const Fractional dual_residual =
519 reduced_costs_.ComputeMaximumDualResidual();
520 if (primal_residual > solution_tolerance ||
521 dual_residual > solution_tolerance) {
522 SOLVER_LOG(logger_,
523 "OPTIMAL was reported, yet one of the residuals is "
524 "above the solution feasibility tolerance after the "
525 "shift/perturbation are removed.");
526 if (parameters_.change_status_to_imprecise()) {
527 problem_status_ = ProblemStatus::IMPRECISE;
528 }
529 } else {
530 // We use the "precise" tolerances here to try to report the best
531 // possible solution. Note however that we cannot really hope for an
532 // infeasibility lower than its corresponding residual error. Note that
533 // we already adapt the tolerance like this during the simplex
534 // execution.
535 const Fractional primal_tolerance = std::max(
536 primal_residual, parameters_.primal_feasibility_tolerance());
537 const Fractional dual_tolerance =
538 std::max(dual_residual, parameters_.dual_feasibility_tolerance());
539 const Fractional primal_infeasibility =
540 variable_values_.ComputeMaximumPrimalInfeasibility();
541 const Fractional dual_infeasibility =
542 reduced_costs_.ComputeMaximumDualInfeasibility();
543 if (primal_infeasibility > primal_tolerance &&
544 dual_infeasibility > dual_tolerance) {
545 SOLVER_LOG(logger_,
546 "OPTIMAL was reported, yet both of the infeasibility "
547 "are above the tolerance after the "
548 "shift/perturbation are removed.");
549 if (parameters_.change_status_to_imprecise()) {
550 problem_status_ = ProblemStatus::IMPRECISE;
551 }
552 } else if (primal_infeasibility > primal_tolerance) {
553 if (num_optims == parameters_.max_number_of_reoptimizations()) {
554 SOLVER_LOG(logger_,
555 "The primal infeasibility is still higher than the "
556 "requested internal tolerance, but the maximum "
557 "number of optimization is reached.");
558 break;
559 }
560 SOLVER_LOG(logger_, "");
561 SOLVER_LOG(logger_, "Re-optimizing with dual simplex ... ");
562 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
563 } else if (dual_infeasibility > dual_tolerance) {
564 if (num_optims == parameters_.max_number_of_reoptimizations()) {
565 SOLVER_LOG(logger_,
566 "The dual infeasibility is still higher than the "
567 "requested internal tolerance, but the maximum "
568 "number of optimization is reached.");
569 break;
570 }
571 SOLVER_LOG(logger_, "");
572 SOLVER_LOG(logger_, "Re-optimizing with primal simplex ... ");
573 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
574 }
575 }
576 }
577 }
578
579 // Check that the return status is "precise".
580 //
581 // TODO(user): we currently skip the DUAL_INFEASIBLE status because the
582 // quantities are not up to date in this case.
583 if (parameters_.change_status_to_imprecise() &&
584 problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
585 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
586 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
587 reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
588 problem_status_ = ProblemStatus::IMPRECISE;
589 } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
590 problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
591 problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
592 if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
593 problem_status_ = ProblemStatus::IMPRECISE;
594 }
595 } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
596 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
597 problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
598 if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
599 problem_status_ = ProblemStatus::IMPRECISE;
600 }
601 }
602 }
603
604 total_time_ = time_limit->GetElapsedTime() - start_time;
605 optimization_time_ = total_time_ - feasibility_time_;
606 num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
607
608 // If the user didn't provide starting variable values, then there is no need
609 // to check for super-basic variables.
610 if (!variable_starting_values_.empty()) {
611 const int num_super_basic = ComputeNumberOfSuperBasicVariables();
612 if (num_super_basic > 0) {
613 SOLVER_LOG(logger_,
614 "Num super-basic variables left after optimize phase: ",
615 num_super_basic);
616 if (parameters_.push_to_vertex()) {
617 if (problem_status_ == ProblemStatus::OPTIMAL) {
618 SOLVER_LOG(logger_, "");
619 phase_ = Phase::PUSH;
620 GLOP_RETURN_IF_ERROR(PrimalPush(time_limit));
621 // TODO(user): We should re-check for feasibility at this point and
622 // apply clean-up as needed.
623 } else {
624 SOLVER_LOG(logger_,
625 "Skipping push phase because optimize didn't succeed.");
626 }
627 }
628 }
629 }
630
631 total_time_ = time_limit->GetElapsedTime() - start_time;
632 push_time_ = total_time_ - feasibility_time_ - optimization_time_;
633 num_push_iterations_ = num_iterations_ - num_feasibility_iterations_ -
634 num_optimization_iterations_;
635
636 // Store the result for the solution getters.
637 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
638 solution_dual_values_ = reduced_costs_.GetDualValues();
639 solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
640 SaveState();
641
642 if (lp.IsMaximizationProblem()) {
643 ChangeSign(&solution_dual_values_);
644 ChangeSign(&solution_reduced_costs_);
645 }
646
647 // If the problem is unbounded, set the objective value to +/- infinity.
648 if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
649 problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
650 solution_objective_value_ =
651 (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
652 : -kInfinity;
653 if (lp.IsMaximizationProblem()) {
654 solution_objective_value_ = -solution_objective_value_;
655 }
656 }
657
658 variable_starting_values_.clear();
659 DisplayAllStats();
660 return Status::OK();
661}
662
664 return problem_status_;
665}
666
668 return solution_objective_value_;
669}
670
672 return num_iterations_;
673}
674
675RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
676
677ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
678
680 return variable_values_.Get(col);
681}
682
684 return solution_reduced_costs_[col];
685}
686
688 return solution_reduced_costs_;
689}
690
692 return solution_dual_values_[row];
693}
694
696 return variables_info_.GetStatusRow()[col];
697}
698
699const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
700
702 // Note the negative sign since the slack variable is such that
703 // constraint_activity + slack_value = 0.
704 return -variable_values_.Get(SlackColIndex(row));
705}
706
708 // The status of the given constraint is the same as the status of the
709 // associated slack variable with a change of sign.
710 const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
713 }
716 }
718}
719
721 DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
722 return solution_primal_ray_;
723}
725 DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
726 return solution_dual_ray_;
727}
728
730 DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
731 return solution_dual_ray_row_combination_;
732}
733
734ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
735
737 DCHECK(basis_factorization_.GetColumnPermutation().empty());
738 return basis_factorization_;
739}
740
741std::string RevisedSimplex::GetPrettySolverStats() const {
742 return absl::StrFormat(
743 "Problem status : %s\n"
744 "Solving time : %-6.4g\n"
745 "Number of iterations : %u\n"
746 "Time for solvability (first phase) : %-6.4g\n"
747 "Number of iterations for solvability : %u\n"
748 "Time for optimization : %-6.4g\n"
749 "Number of iterations for optimization : %u\n"
750 "Stop after first basis : %d\n",
751 GetProblemStatusString(problem_status_), total_time_, num_iterations_,
752 feasibility_time_, num_feasibility_iterations_, optimization_time_,
753 num_optimization_iterations_,
754 absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
755}
756
758 // TODO(user): Count what is missing.
759 return DeterministicTimeForFpOperations(num_update_price_operations_) +
760 basis_factorization_.DeterministicTime() +
761 update_row_.DeterministicTime() +
762 entering_variable_.DeterministicTime() +
763 reduced_costs_.DeterministicTime() +
764 primal_edge_norms_.DeterministicTime();
765}
766
767void RevisedSimplex::SetVariableNames() {
768 variable_name_.resize(num_cols_, "");
769 for (ColIndex col(0); col < first_slack_col_; ++col) {
770 const ColIndex var_index = col + 1;
771 variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
772 }
773 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
774 const ColIndex var_index = col - first_slack_col_ + 1;
775 variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
776 }
777}
778
779void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
780 ColIndex col, VariableStatus status) {
781 variables_info_.UpdateToNonBasicStatus(col, status);
782 variable_values_.SetNonBasicVariableValueFromStatus(col);
783}
784
785bool RevisedSimplex::BasisIsConsistent() const {
786 const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
787 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
788 for (RowIndex row(0); row < num_rows_; ++row) {
789 const ColIndex col = basis_[row];
790 if (!is_basic.IsSet(col)) return false;
791 if (variable_statuses[col] != VariableStatus::BASIC) return false;
792 }
793 ColIndex cols_in_basis(0);
794 ColIndex cols_not_in_basis(0);
795 for (ColIndex col(0); col < num_cols_; ++col) {
796 cols_in_basis += is_basic.IsSet(col);
797 cols_not_in_basis += !is_basic.IsSet(col);
798 if (is_basic.IsSet(col) !=
799 (variable_statuses[col] == VariableStatus::BASIC)) {
800 return false;
801 }
802 }
803 if (cols_in_basis != RowToColIndex(num_rows_)) return false;
804 if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
805 return true;
806}
807
808// Note(user): The basis factorization is not updated by this function but by
809// UpdateAndPivot().
810void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
811 VariableStatus leaving_variable_status) {
812 SCOPED_TIME_STAT(&function_stats_);
813 DCHECK_COL_BOUNDS(entering_col);
814 DCHECK_ROW_BOUNDS(basis_row);
815
816 // Check that this is not called with an entering_col already in the basis
817 // and that the leaving col is indeed in the basis.
818 DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
819 DCHECK_NE(basis_[basis_row], entering_col);
820 DCHECK_NE(basis_[basis_row], kInvalidCol);
821
822 const ColIndex leaving_col = basis_[basis_row];
823 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
824
825 // Make leaving_col leave the basis and update relevant data.
826 // Note thate the leaving variable value is not necessarily at its exact
827 // bound, which is like a bound shift.
828 variables_info_.UpdateToNonBasicStatus(leaving_col, leaving_variable_status);
829 DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
830 leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
831 leaving_variable_status == VariableStatus::FIXED_VALUE);
832
833 basis_[basis_row] = entering_col;
834 variables_info_.UpdateToBasicStatus(entering_col);
835 update_row_.Invalidate();
836}
837
838namespace {
839
840// Comparator used to sort column indices according to a given value vector.
841class ColumnComparator {
842 public:
843 explicit ColumnComparator(const DenseRow& value) : value_(value) {}
844 bool operator()(ColIndex col_a, ColIndex col_b) const {
845 return value_[col_a] < value_[col_b];
846 }
847
848 private:
849 const DenseRow& value_;
850};
851
852} // namespace
853
854// To understand better what is going on in this function, let us say that this
855// algorithm will produce the optimal solution to a problem containing only
856// singleton columns (provided that the variables start at the minimum possible
857// cost, see DefaultVariableStatus()). This is unit tested.
858//
859// The error_ must be equal to the constraint activity for the current variable
860// values before this function is called. If error_[row] is 0.0, that mean this
861// constraint is currently feasible.
862void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
863 SCOPED_TIME_STAT(&function_stats_);
864 // Computes the singleton columns and the cost variation of the corresponding
865 // variables (in the only possible direction, i.e. away from its current
866 // bound) for a unit change in the infeasibility of the corresponding row.
867 //
868 // Note that the slack columns will be treated as normal singleton columns.
869 std::vector<ColIndex> singleton_column;
870 DenseRow cost_variation(num_cols_, 0.0);
871 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
872 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
873 for (ColIndex col(0); col < num_cols_; ++col) {
874 if (compact_matrix_.column(col).num_entries() != 1) continue;
875 if (lower_bounds[col] == upper_bounds[col]) continue;
876 const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
877 if (variable_values_.Get(col) == lower_bounds[col]) {
878 cost_variation[col] = objective_[col] / std::abs(slope);
879 } else {
880 cost_variation[col] = -objective_[col] / std::abs(slope);
881 }
882 singleton_column.push_back(col);
883 }
884 if (singleton_column.empty()) return;
885
886 // Sort the singleton columns for the case where many of them correspond to
887 // the same row (equivalent to a piecewise-linear objective on this variable).
888 // Negative cost_variation first since moving the singleton variable away from
889 // its current bound means the least decrease in the objective function for
890 // the same "error" variation.
891 ColumnComparator comparator(cost_variation);
892 std::sort(singleton_column.begin(), singleton_column.end(), comparator);
893 DCHECK_LE(cost_variation[singleton_column.front()],
894 cost_variation[singleton_column.back()]);
895
896 // Use a singleton column to "absorb" the error when possible to avoid
897 // introducing unneeded artificial variables. Note that with scaling on, the
898 // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
899 // them) and that the SingletonColumnSignPreprocessor makes them all positive.
900 // However, this code works for any coefficient value.
901 const DenseRow& variable_values = variable_values_.GetDenseRow();
902 for (const ColIndex col : singleton_column) {
903 const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
904
905 // If no singleton columns have entered the basis for this row, choose the
906 // first one. It will be the one with the least decrease in the objective
907 // function when it leaves the basis.
908 if ((*basis)[row] == kInvalidCol) {
909 (*basis)[row] = col;
910 }
911
912 // If there is already no error in this row (i.e. it is primal-feasible),
913 // there is nothing to do.
914 if (error_[row] == 0.0) continue;
915
916 // In this case, all the infeasibility can be "absorbed" and this variable
917 // may not be at one of its bound anymore, so we have to use it in the
918 // basis.
919 const Fractional coeff =
920 compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
921 const Fractional new_value = variable_values[col] + error_[row] / coeff;
922 if (new_value >= lower_bounds[col] && new_value <= upper_bounds[col]) {
923 error_[row] = 0.0;
924
925 // Use this variable in the initial basis.
926 (*basis)[row] = col;
927 continue;
928 }
929
930 // The idea here is that if the singleton column cannot be used to "absorb"
931 // all error_[row], if it is boxed, it can still be used to make the
932 // infeasibility smaller (with a bound flip).
933 const Fractional box_width = variables_info_.GetBoundDifference(col);
934 DCHECK_NE(box_width, 0.0);
935 DCHECK_NE(error_[row], 0.0);
936 const Fractional error_sign = error_[row] / coeff;
937 if (variable_values[col] == lower_bounds[col] && error_sign > 0.0) {
938 DCHECK(IsFinite(box_width));
939 error_[row] -= coeff * box_width;
940 SetNonBasicVariableStatusAndDeriveValue(col,
942 continue;
943 }
944 if (variable_values[col] == upper_bounds[col] && error_sign < 0.0) {
945 DCHECK(IsFinite(box_width));
946 error_[row] += coeff * box_width;
947 SetNonBasicVariableStatusAndDeriveValue(col,
949 continue;
950 }
951 }
952}
953
954bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
955 const LinearProgram& lp, bool lp_is_in_equation_form,
956 bool* only_change_is_new_rows, bool* only_change_is_new_cols,
957 ColIndex* num_new_cols) {
958 SCOPED_TIME_STAT(&function_stats_);
959 DCHECK(only_change_is_new_rows != nullptr);
960 DCHECK(only_change_is_new_cols != nullptr);
961 DCHECK(num_new_cols != nullptr);
962 DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
963 DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
964
965 // This works whether the lp is in equation form (with slack) or not.
966 const bool old_part_of_matrix_is_unchanged =
968 num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
969
970 // This is the only adaptation we need for the test below.
971 const ColIndex lp_first_slack =
972 lp_is_in_equation_form ? lp.GetFirstSlackVariable() : lp.num_variables();
973
974 // Test if the matrix is unchanged, and if yes, just returns true. Note that
975 // this doesn't check the columns corresponding to the slack variables,
976 // because they were checked by lp.IsInEquationForm() when Solve() was called.
977 if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
978 lp_first_slack == first_slack_col_) {
979 // Tricky: if the parameters "use_transposed_matrix" changed since last call
980 // we want to reflect the current state. We use the empty transposed matrix
981 // to detect that. Recomputing the transpose when the matrix is empty is not
982 // really a big overhead.
983 if (parameters_.use_transposed_matrix()) {
984 if (transposed_matrix_.IsEmpty()) {
985 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
986 }
987 } else {
988 transposed_matrix_.Reset(RowIndex(0));
989 }
990 return true;
991 }
992
993 // Check if the new matrix can be derived from the old one just by adding
994 // new rows (i.e new constraints).
995 *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
996 lp.num_constraints() > num_rows_ &&
997 lp_first_slack == first_slack_col_;
998
999 // Check if the new matrix can be derived from the old one just by adding
1000 // new columns (i.e new variables).
1001 *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
1002 lp.num_constraints() == num_rows_ &&
1003 lp_first_slack > first_slack_col_;
1004 *num_new_cols = *only_change_is_new_cols ? lp_first_slack - first_slack_col_
1005 : ColIndex(0);
1006
1007 // Initialize first_slack_.
1008 first_slack_col_ = lp_first_slack;
1009
1010 // Initialize the new dimensions.
1011 num_rows_ = lp.num_constraints();
1012 num_cols_ = lp_first_slack + RowToColIndex(lp.num_constraints());
1013
1014 // Populate compact_matrix_ and transposed_matrix_ if needed.
1015 if (lp_is_in_equation_form) {
1016 // TODO(user): This can be sped up by removing the MatrixView, but then
1017 // this path will likely go away.
1018 compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
1019 } else {
1020 compact_matrix_.PopulateFromSparseMatrixAndAddSlacks(lp.GetSparseMatrix());
1021 }
1022 if (parameters_.use_transposed_matrix()) {
1023 transposed_matrix_.PopulateFromTranspose(compact_matrix_);
1024 } else {
1025 transposed_matrix_.Reset(RowIndex(0));
1026 }
1027 return false;
1028}
1029
1030// Preconditions: This should only be called if there are only new variable
1031// in the lp.
1032bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1033 const LinearProgram& lp, bool lp_is_in_equation_form,
1034 ColIndex num_new_cols) {
1035 SCOPED_TIME_STAT(&function_stats_);
1036 DCHECK_LE(num_new_cols, first_slack_col_);
1037 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1038
1039 // Check the original variable bounds.
1040 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1041 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1042 for (ColIndex col(0); col < first_new_col; ++col) {
1043 if (lower_bounds[col] != lp.variable_lower_bounds()[col] ||
1044 upper_bounds[col] != lp.variable_upper_bounds()[col]) {
1045 return false;
1046 }
1047 }
1048
1049 // Check that each new variable has a bound of zero.
1050 for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
1051 if (lp.variable_lower_bounds()[col] != 0.0 &&
1052 lp.variable_upper_bounds()[col] != 0.0) {
1053 return false;
1054 }
1055 }
1056
1057 // Check that the slack bounds are unchanged.
1058 if (lp_is_in_equation_form) {
1059 for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
1060 if (lower_bounds[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
1061 upper_bounds[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
1062 return false;
1063 }
1064 }
1065 } else {
1066 DCHECK_EQ(num_rows_, lp.num_constraints());
1067 for (RowIndex row(0); row < num_rows_; ++row) {
1068 const ColIndex col = first_slack_col_ + RowToColIndex(row);
1069 if (lower_bounds[col - num_new_cols] !=
1070 -lp.constraint_upper_bounds()[row] ||
1071 upper_bounds[col - num_new_cols] !=
1072 -lp.constraint_lower_bounds()[row]) {
1073 return false;
1074 }
1075 }
1076 }
1077 return true;
1078}
1079
1080bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
1081 const LinearProgram& lp) {
1082 SCOPED_TIME_STAT(&function_stats_);
1083
1084 bool objective_is_unchanged = true;
1085 objective_.resize(num_cols_, 0.0);
1086
1087 // This function work whether the lp is in equation form (with slack) or
1088 // without, since the objective of the slacks are always zero.
1089 DCHECK_GE(num_cols_, lp.num_variables());
1090 for (ColIndex col(lp.num_variables()); col < num_cols_; ++col) {
1091 if (objective_[col] != 0.0) {
1092 objective_is_unchanged = false;
1093 objective_[col] = 0.0;
1094 }
1095 }
1096
1097 if (lp.IsMaximizationProblem()) {
1098 // Note that we use the minimization version of the objective internally.
1099 for (ColIndex col(0); col < lp.num_variables(); ++col) {
1100 const Fractional coeff = -lp.objective_coefficients()[col];
1101 if (objective_[col] != coeff) {
1102 objective_is_unchanged = false;
1103 objective_[col] = coeff;
1104 }
1105 }
1106 objective_offset_ = -lp.objective_offset();
1107 objective_scaling_factor_ = -lp.objective_scaling_factor();
1108 } else {
1109 for (ColIndex col(0); col < lp.num_variables(); ++col) {
1110 const Fractional coeff = lp.objective_coefficients()[col];
1111 if (objective_[col] != coeff) {
1112 objective_is_unchanged = false;
1113 objective_[col] = coeff;
1114 }
1115 }
1116 objective_offset_ = lp.objective_offset();
1117 objective_scaling_factor_ = lp.objective_scaling_factor();
1118 }
1119
1120 return objective_is_unchanged;
1121}
1122
1123void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
1124 objective_limit_reached_ = false;
1125 DCHECK(std::isfinite(objective_offset_));
1126 DCHECK(std::isfinite(objective_scaling_factor_));
1127 DCHECK_NE(0.0, objective_scaling_factor_);
1128
1129 // This sets dual_objective_limit_ and then primal_objective_limit_.
1130 for (const bool set_dual : {true, false}) {
1131 // NOTE(user): If objective_scaling_factor_ is negative, the optimization
1132 // direction was reversed (during preprocessing or inside revised simplex),
1133 // i.e., the original problem is maximization. In such case the _meaning_ of
1134 // the lower and upper limits is swapped. To this end we must change the
1135 // signs of limits, which happens automatically when calculating shifted
1136 // limits. We must also use upper (resp. lower) limit in place of lower
1137 // (resp. upper) limit when calculating the final objective_limit_.
1138 //
1139 // Choose lower limit if using the dual simplex and scaling factor is
1140 // negative or if using the primal simplex and scaling is nonnegative, upper
1141 // limit otherwise.
1142 const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
1143 ? parameters_.objective_lower_limit()
1144 : parameters_.objective_upper_limit();
1145 const Fractional shifted_limit =
1146 limit / objective_scaling_factor_ - objective_offset_;
1147 if (set_dual) {
1148 dual_objective_limit_ = shifted_limit;
1149 } else {
1150 primal_objective_limit_ = shifted_limit;
1151 }
1152 }
1153}
1154
1155// This implementation starts with an initial matrix B equal to the identity
1156// matrix (modulo a column permutation). For that it uses either the slack
1157// variables or the singleton columns present in the problem. Afterwards, the
1158// fixed slacks in the basis are exchanged with normal columns of A if possible
1159// by the InitialBasis class.
1160Status RevisedSimplex::CreateInitialBasis() {
1161 SCOPED_TIME_STAT(&function_stats_);
1162
1163 // Initialize the variable values and statuses.
1164 // Note that for the dual algorithm, boxed variables will be made
1165 // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
1166 // really matter at which of their two finite bounds they start.
1167 variables_info_.InitializeToDefaultStatus();
1168 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1169
1170 // Start by using an all-slack basis.
1171 RowToColMapping basis(num_rows_, kInvalidCol);
1172 for (RowIndex row(0); row < num_rows_; ++row) {
1173 basis[row] = SlackColIndex(row);
1174 }
1175
1176 // If possible, for the primal simplex we replace some slack variables with
1177 // some singleton columns present in the problem.
1178 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1179 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1180 if (!parameters_.use_dual_simplex() &&
1181 parameters_.initial_basis() != GlopParameters::MAROS &&
1182 parameters_.exploit_singleton_column_in_initial_basis()) {
1183 // For UseSingletonColumnInInitialBasis() to work better, we change
1184 // the value of the boxed singleton column with a non-zero cost to the best
1185 // of their two bounds.
1186 for (ColIndex col(0); col < num_cols_; ++col) {
1187 if (compact_matrix_.column(col).num_entries() != 1) continue;
1188 const VariableStatus status = variables_info_.GetStatusRow()[col];
1189 const Fractional objective = objective_[col];
1190 if (objective > 0 && IsFinite(lower_bounds[col]) &&
1192 SetNonBasicVariableStatusAndDeriveValue(col,
1194 } else if (objective < 0 && IsFinite(upper_bounds[col]) &&
1196 SetNonBasicVariableStatusAndDeriveValue(col,
1198 }
1199 }
1200
1201 // Compute the primal infeasibility of the initial variable values in
1202 // error_.
1203 ComputeVariableValuesError();
1204
1205 // TODO(user): A better but slightly more complex algorithm would be to:
1206 // - Ignore all singleton columns except the slacks during phase I.
1207 // - For this, change the slack variable bounds accordingly.
1208 // - At the end of phase I, restore the slack variable bounds and perform
1209 // the same algorithm to start with feasible and "optimal" values of the
1210 // singleton columns.
1211 basis.assign(num_rows_, kInvalidCol);
1212 UseSingletonColumnInInitialBasis(&basis);
1213
1214 // Eventually complete the basis with fixed slack columns.
1215 for (RowIndex row(0); row < num_rows_; ++row) {
1216 if (basis[row] == kInvalidCol) {
1217 basis[row] = SlackColIndex(row);
1218 }
1219 }
1220 }
1221
1222 // Use an advanced initial basis to remove the fixed variables from the basis.
1223 if (parameters_.initial_basis() == GlopParameters::NONE) {
1224 return InitializeFirstBasis(basis);
1225 }
1226 if (parameters_.initial_basis() == GlopParameters::MAROS) {
1227 InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1228 upper_bounds, variables_info_.GetTypeRow());
1229 if (parameters_.use_dual_simplex()) {
1230 // This dual version only uses zero-cost columns to complete the
1231 // basis.
1232 initial_basis.GetDualMarosBasis(num_cols_, &basis);
1233 } else {
1234 initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1235 }
1236 int number_changed = 0;
1237 for (RowIndex row(0); row < num_rows_; ++row) {
1238 if (basis[row] != SlackColIndex(row)) {
1239 number_changed++;
1240 }
1241 }
1242 VLOG(1) << "Number of Maros basis changes: " << number_changed;
1243 } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1244 parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1245 // First unassign the fixed variables from basis.
1246 int num_fixed_variables = 0;
1247 for (RowIndex row(0); row < basis.size(); ++row) {
1248 const ColIndex col = basis[row];
1249 if (lower_bounds[col] == upper_bounds[col]) {
1250 basis[row] = kInvalidCol;
1251 ++num_fixed_variables;
1252 }
1253 }
1254
1255 if (num_fixed_variables == 0) {
1256 SOLVER_LOG(logger_, "Crash is set to ", parameters_.initial_basis(),
1257 " but there is no equality rows to remove from initial all "
1258 "slack basis. Starting from there.");
1259 } else {
1260 // Then complete the basis with an advanced initial basis algorithm.
1261 SOLVER_LOG(logger_, "Trying to remove ", num_fixed_variables,
1262 " fixed variables from the initial basis.");
1263 InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1264 upper_bounds, variables_info_.GetTypeRow());
1265
1266 if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1267 if (parameters_.use_scaling()) {
1268 initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1269 } else {
1270 VLOG(1) << "Bixby initial basis algorithm requires the problem "
1271 << "to be scaled. Skipping Bixby's algorithm.";
1272 }
1273 } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1274 // Note the use of num_cols_ here because this algorithm
1275 // benefits from treating fixed slack columns like any other column.
1276 if (parameters_.use_dual_simplex()) {
1277 // This dual version only uses zero-cost columns to complete the
1278 // basis.
1279 initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1280 } else {
1281 initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1282 }
1283
1284 const Status status = InitializeFirstBasis(basis);
1285 if (status.ok()) {
1286 return status;
1287 } else {
1288 SOLVER_LOG(
1289 logger_,
1290 "Advanced basis algo failed, Reverting to all slack basis.");
1291
1292 for (RowIndex row(0); row < num_rows_; ++row) {
1293 basis[row] = SlackColIndex(row);
1294 }
1295 }
1296 }
1297 }
1298 } else {
1299 LOG(WARNING) << "Unsupported initial_basis parameters: "
1300 << parameters_.initial_basis();
1301 }
1302
1303 return InitializeFirstBasis(basis);
1304}
1305
1306Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1307 basis_ = basis;
1308
1309 // For each row which does not have a basic column, assign it to the
1310 // corresponding slack column.
1311 basis_.resize(num_rows_, kInvalidCol);
1312 for (RowIndex row(0); row < num_rows_; ++row) {
1313 if (basis_[row] == kInvalidCol) {
1314 basis_[row] = SlackColIndex(row);
1315 }
1316 }
1317
1318 GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1319 PermuteBasis();
1320
1321 // Test that the upper bound on the condition number of basis is not too high.
1322 // The number was not computed by any rigorous analysis, we just prefer to
1323 // revert to the all slack basis if the condition number of our heuristic
1324 // first basis seems bad. See for instance on cond11.mps, where we get an
1325 // infinity upper bound.
1326 const Fractional condition_number_ub =
1327 basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1328 if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1329 const std::string error_message =
1330 absl::StrCat("The matrix condition number upper bound is too high: ",
1331 condition_number_ub);
1332 SOLVER_LOG(logger_, error_message);
1333 return Status(Status::ERROR_LU, error_message);
1334 }
1335
1336 // Everything is okay, finish the initialization.
1337 for (RowIndex row(0); row < num_rows_; ++row) {
1338 variables_info_.UpdateToBasicStatus(basis_[row]);
1339 }
1340 DCHECK(BasisIsConsistent());
1341
1342 variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1343 variable_values_.RecomputeBasicVariableValues();
1344
1345 if (logger_->LoggingIsEnabled()) {
1346 // TODO(user): Maybe return an error status if this is too high. Note
1347 // however that if we want to do that, we need to reset variables_info_ to a
1348 // consistent state.
1349 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1350 if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1351 SOLVER_LOG(
1352 logger_,
1353 "The primal residual of the initial basis is above the tolerance, ",
1354 variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1355 }
1356 }
1357 return Status::OK();
1358}
1359
1360Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1361 parameters_ = initial_parameters_;
1362 PropagateParameters();
1363
1364 // We accept both kind of input.
1365 //
1366 // TODO(user): Ideally there should be no need to ever put the slack in the
1367 // LinearProgram. That take extra memory (one big SparseColumn per slack) and
1368 // just add visible overhead in incremental solve when one wants to add/remove
1369 // constraints. But for historical reason, we handle both for now.
1370 const bool lp_is_in_equation_form = lp.IsInEquationForm();
1371
1372 // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1373 // this is where num_rows_ and num_cols_ are computed.
1374 //
1375 // Note that these functions can't depend on use_dual_simplex() since we may
1376 // change it below.
1377 ColIndex num_new_cols(0);
1378 bool only_change_is_new_rows = false;
1379 bool only_change_is_new_cols = false;
1380 bool matrix_is_unchanged = true;
1381 bool only_new_bounds = false;
1382 if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1383 matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1384 lp, lp_is_in_equation_form, &only_change_is_new_rows,
1385 &only_change_is_new_cols, &num_new_cols);
1386 only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1387 OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1388 lp, lp_is_in_equation_form, num_new_cols);
1389 } else if (DEBUG_MODE) {
1390 CHECK(InitializeMatrixAndTestIfUnchanged(
1391 lp, lp_is_in_equation_form, &only_change_is_new_rows,
1392 &only_change_is_new_cols, &num_new_cols));
1393 }
1394 notify_that_matrix_is_unchanged_ = false;
1395
1396 // TODO(user): move objective with ReducedCosts class.
1397 const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1398
1399 const bool bounds_are_unchanged =
1400 lp_is_in_equation_form
1401 ? variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1402 lp.variable_lower_bounds(), lp.variable_upper_bounds())
1403 : variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1404 lp.variable_lower_bounds(), lp.variable_upper_bounds(),
1405 lp.constraint_lower_bounds(), lp.constraint_upper_bounds());
1406
1407 // If parameters_.allow_simplex_algorithm_change() is true and we already have
1408 // a primal (resp. dual) feasible solution, then we use the primal (resp.
1409 // dual) algorithm since there is a good chance that it will be faster.
1410 if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1411 if (objective_is_unchanged && !bounds_are_unchanged) {
1412 parameters_.set_use_dual_simplex(true);
1413 PropagateParameters();
1414 }
1415 if (bounds_are_unchanged && !objective_is_unchanged) {
1416 parameters_.set_use_dual_simplex(false);
1417 PropagateParameters();
1418 }
1419 }
1420
1421 InitializeObjectiveLimit(lp);
1422
1423 // Computes the variable name as soon as possible for logging.
1424 // TODO(user): do we really need to store them? we could just compute them
1425 // on the fly since we do not need the speed.
1426 if (VLOG_IS_ON(2)) {
1427 SetVariableNames();
1428 }
1429
1430 // Warm-start? This is supported only if the solution_state_ is non empty,
1431 // i.e., this revised simplex i) was already used to solve a problem, or
1432 // ii) the solution state was provided externally. Note that the
1433 // solution_state_ may have nothing to do with the current problem, e.g.,
1434 // objective, matrix, and/or bounds had changed. So we support several
1435 // scenarios of warm-start depending on how did the problem change and which
1436 // simplex algorithm is used (primal or dual).
1437 bool solve_from_scratch = true;
1438
1439 // Try to perform a "quick" warm-start with no matrix factorization involved.
1440 if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1441 if (!parameters_.use_dual_simplex()) {
1442 // With primal simplex, always clear dual norms and dual pricing.
1443 // Incrementality is supported only if only change to the matrix and
1444 // bounds is adding new columns (objective may change), and that all
1445 // new columns have a bound equal to zero.
1446 dual_edge_norms_.Clear();
1447 dual_pricing_vector_.clear();
1448 if (matrix_is_unchanged && bounds_are_unchanged) {
1449 // TODO(user): Do not do that if objective_is_unchanged. Currently
1450 // this seems to break something. Investigate.
1451 reduced_costs_.ClearAndRemoveCostShifts();
1452 solve_from_scratch = false;
1453 } else if (only_change_is_new_cols && only_new_bounds) {
1454 variables_info_.InitializeFromBasisState(first_slack_col_, num_new_cols,
1455 solution_state_);
1456 variable_values_.ResetAllNonBasicVariableValues(
1457 variable_starting_values_);
1458
1459 const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1460 for (ColIndex& col_ref : basis_) {
1461 if (col_ref >= first_new_col) {
1462 col_ref += num_new_cols;
1463 }
1464 }
1465
1466 // Make sure the primal edge norm are recomputed from scratch.
1467 // TODO(user): only the norms of the new columns actually need to be
1468 // computed.
1469 primal_edge_norms_.Clear();
1470 reduced_costs_.ClearAndRemoveCostShifts();
1471 solve_from_scratch = false;
1472 }
1473 } else {
1474 // With dual simplex, always clear primal norms. Incrementality is
1475 // supported only if the objective remains the same (the matrix may
1476 // contain new rows and the bounds may change).
1477 primal_edge_norms_.Clear();
1478 if (objective_is_unchanged) {
1479 if (matrix_is_unchanged) {
1480 if (!bounds_are_unchanged) {
1481 variables_info_.InitializeFromBasisState(
1482 first_slack_col_, ColIndex(0), solution_state_);
1483 variable_values_.ResetAllNonBasicVariableValues(
1484 variable_starting_values_);
1485 variable_values_.RecomputeBasicVariableValues();
1486 }
1487 solve_from_scratch = false;
1488 } else if (only_change_is_new_rows) {
1489 // For the dual-simplex, we also perform a warm start if a couple of
1490 // new rows where added.
1491 variables_info_.InitializeFromBasisState(
1492 first_slack_col_, ColIndex(0), solution_state_);
1493 dual_edge_norms_.ResizeOnNewRows(num_rows_);
1494
1495 // TODO(user): The reduced costs do not really need to be recomputed.
1496 // We just need to initialize the ones of the new slack variables to
1497 // 0.
1498 reduced_costs_.ClearAndRemoveCostShifts();
1499 dual_pricing_vector_.clear();
1500
1501 // Note that this needs to be done after the Clear() calls above.
1502 if (InitializeFirstBasis(basis_).ok()) {
1503 solve_from_scratch = false;
1504 }
1505 }
1506 }
1507 }
1508 }
1509
1510 // If we couldn't perform a "quick" warm start above, we can at least try to
1511 // reuse the variable statuses.
1512 if (solve_from_scratch && !solution_state_.IsEmpty()) {
1513 basis_factorization_.Clear();
1514 reduced_costs_.ClearAndRemoveCostShifts();
1515 primal_edge_norms_.Clear();
1516 dual_edge_norms_.Clear();
1517 dual_pricing_vector_.clear();
1518
1519 // If an external basis has been provided or if the matrix changed, we need
1520 // to perform more work, e.g., factorize the proposed basis and validate it.
1521 variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
1522 solution_state_);
1523
1524 // Use the set of basic columns as a "hint" to construct the first basis.
1525 std::vector<ColIndex> candidates;
1526 for (const ColIndex col : variables_info_.GetIsBasicBitRow()) {
1527 candidates.push_back(col);
1528 }
1529 SOLVER_LOG(logger_, "The warm-start state contains ", candidates.size(),
1530 " candidates for the basis (num_rows = ", num_rows_.value(),
1531 ").");
1532
1533 // Optimization: Try to factorize it right away if we have the correct
1534 // number of element. Ideally the other path below would no require a
1535 // "double" factorization effort, so this would not be needed.
1536 if (candidates.size() == num_rows_) {
1537 basis_.clear();
1538 for (const ColIndex col : candidates) {
1539 basis_.push_back(col);
1540 }
1541
1542 // TODO(user): Depending on the error here, there is no point doing extra
1543 // work below. This is the case when we fail because of a bad initial
1544 // condition number for instance.
1545 if (InitializeFirstBasis(basis_).ok()) {
1546 solve_from_scratch = false;
1547 }
1548 }
1549
1550 if (solve_from_scratch) {
1551 basis_ = basis_factorization_.ComputeInitialBasis(candidates);
1552 const int num_super_basic =
1553 variables_info_.ChangeUnusedBasicVariablesToFree(basis_);
1554 const int num_snapped = variables_info_.SnapFreeVariablesToBound(
1555 parameters_.crossover_bound_snapping_distance(),
1556 variable_starting_values_);
1557 if (logger_->LoggingIsEnabled()) {
1558 SOLVER_LOG(logger_, "The initial basis did not use ",
1559 " BASIC columns from the initial state and used ",
1560 (num_rows_ - (candidates.size() - num_super_basic)).value(),
1561 " slack variables that were not marked BASIC.");
1562 if (num_snapped > 0) {
1563 SOLVER_LOG(logger_, num_snapped,
1564 " of the FREE variables where moved to their bound.");
1565 }
1566 }
1567
1568 if (InitializeFirstBasis(basis_).ok()) {
1569 solve_from_scratch = false;
1570 } else {
1571 SOLVER_LOG(logger_,
1572 "RevisedSimplex is not using the warm start "
1573 "basis because it is not factorizable.");
1574 }
1575 }
1576 }
1577
1578 if (solve_from_scratch) {
1579 SOLVER_LOG(logger_, "Starting basis: create from scratch.");
1580 basis_factorization_.Clear();
1581 reduced_costs_.ClearAndRemoveCostShifts();
1582 primal_edge_norms_.Clear();
1583 dual_edge_norms_.Clear();
1584 dual_pricing_vector_.clear();
1585 GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1586 } else {
1587 SOLVER_LOG(logger_, "Starting basis: incremental solve.");
1588 }
1589 DCHECK(BasisIsConsistent());
1590 return Status::OK();
1591}
1592
1593void RevisedSimplex::DisplayBasicVariableStatistics() {
1594 SCOPED_TIME_STAT(&function_stats_);
1595
1596 int num_fixed_variables = 0;
1597 int num_free_variables = 0;
1598 int num_variables_at_bound = 0;
1599 int num_slack_variables = 0;
1600 int num_infeasible_variables = 0;
1601
1602 const DenseRow& variable_values = variable_values_.GetDenseRow();
1603 const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1604 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1605 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1606 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1607 for (RowIndex row(0); row < num_rows_; ++row) {
1608 const ColIndex col = basis_[row];
1609 const Fractional value = variable_values[col];
1610 if (variable_types[col] == VariableType::UNCONSTRAINED) {
1611 ++num_free_variables;
1612 }
1613 if (value > upper_bounds[col] + tolerance ||
1614 value < lower_bounds[col] - tolerance) {
1615 ++num_infeasible_variables;
1616 }
1617 if (col >= first_slack_col_) {
1618 ++num_slack_variables;
1619 }
1620 if (lower_bounds[col] == upper_bounds[col]) {
1621 ++num_fixed_variables;
1622 } else if (variable_values[col] == lower_bounds[col] ||
1623 variable_values[col] == upper_bounds[col]) {
1624 ++num_variables_at_bound;
1625 }
1626 }
1627
1628 SOLVER_LOG(logger_, "The matrix with slacks has ",
1629 compact_matrix_.num_rows().value(), " rows, ",
1630 compact_matrix_.num_cols().value(), " columns, ",
1631 compact_matrix_.num_entries().value(), " entries.");
1632 SOLVER_LOG(logger_, "Number of basic infeasible variables: ",
1633 num_infeasible_variables);
1634 SOLVER_LOG(logger_, "Number of basic slack variables: ", num_slack_variables);
1635 SOLVER_LOG(logger_,
1636 "Number of basic variables at bound: ", num_variables_at_bound);
1637 SOLVER_LOG(logger_, "Number of basic fixed variables: ", num_fixed_variables);
1638 SOLVER_LOG(logger_, "Number of basic free variables: ", num_free_variables);
1639 SOLVER_LOG(logger_, "Number of super-basic variables: ",
1640 ComputeNumberOfSuperBasicVariables());
1641}
1642
1643void RevisedSimplex::SaveState() {
1644 DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1645 solution_state_.statuses = variables_info_.GetStatusRow();
1646 solution_state_has_been_set_externally_ = false;
1647}
1648
1649RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1650 DenseBooleanColumn contains_data(num_rows_, false);
1651 for (ColIndex col(0); col < num_cols_; ++col) {
1652 for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1653 contains_data[e.row()] = true;
1654 }
1655 }
1656 RowIndex num_empty_rows(0);
1657 for (RowIndex row(0); row < num_rows_; ++row) {
1658 if (!contains_data[row]) {
1659 ++num_empty_rows;
1660 VLOG(2) << "Row " << row << " is empty.";
1661 }
1662 }
1663 return num_empty_rows;
1664}
1665
1666ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1667 ColIndex num_empty_cols(0);
1668 for (ColIndex col(0); col < num_cols_; ++col) {
1669 if (compact_matrix_.column(col).IsEmpty()) {
1670 ++num_empty_cols;
1671 VLOG(2) << "Column " << col << " is empty.";
1672 }
1673 }
1674 return num_empty_cols;
1675}
1676
1677int RevisedSimplex::ComputeNumberOfSuperBasicVariables() const {
1678 const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
1679 int num_super_basic = 0;
1680 for (ColIndex col(0); col < num_cols_; ++col) {
1681 if (variable_statuses[col] == VariableStatus::FREE &&
1682 variable_values_.Get(col) != 0.0) {
1683 ++num_super_basic;
1684 }
1685 }
1686 return num_super_basic;
1687}
1688
1689void RevisedSimplex::CorrectErrorsOnVariableValues() {
1690 SCOPED_TIME_STAT(&function_stats_);
1691 DCHECK(basis_factorization_.IsRefactorized());
1692
1693 // TODO(user): The primal residual error does not change if we take degenerate
1694 // steps or if we do not change the variable values. No need to recompute it
1695 // in this case.
1696 const Fractional primal_residual =
1697 variable_values_.ComputeMaximumPrimalResidual();
1698
1699 // If the primal_residual is within the tolerance, no need to recompute
1700 // the basic variable values with a better precision.
1701 if (primal_residual >= parameters_.harris_tolerance_ratio() *
1702 parameters_.primal_feasibility_tolerance()) {
1703 variable_values_.RecomputeBasicVariableValues();
1704 VLOG(1) << "Primal infeasibility (bounds error) = "
1705 << variable_values_.ComputeMaximumPrimalInfeasibility()
1706 << ", Primal residual |A.x - b| = "
1707 << variable_values_.ComputeMaximumPrimalResidual();
1708 }
1709}
1710
1711void RevisedSimplex::ComputeVariableValuesError() {
1712 SCOPED_TIME_STAT(&function_stats_);
1713 error_.AssignToZero(num_rows_);
1714 const DenseRow& variable_values = variable_values_.GetDenseRow();
1715 for (ColIndex col(0); col < num_cols_; ++col) {
1716 const Fractional value = variable_values[col];
1717 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1718 }
1719}
1720
1721void RevisedSimplex::ComputeDirection(ColIndex col) {
1722 SCOPED_TIME_STAT(&function_stats_);
1724 basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1725 Fractional norm = 0.0;
1726 if (direction_.non_zeros.empty()) {
1727 // We still compute the direction non-zeros because our code relies on it.
1728 const RowIndex num_rows = num_rows_;
1729 const DenseColumn::ConstView direction = direction_.values.const_view();
1730 for (RowIndex row(0); row < num_rows; ++row) {
1731 const Fractional value = direction[row];
1732 if (value != 0.0) {
1733 direction_.non_zeros.push_back(row);
1734 norm = std::max(norm, std::abs(value));
1735 }
1736 }
1737 } else {
1738 for (const auto e : direction_) {
1739 norm = std::max(norm, std::abs(e.coefficient()));
1740 }
1741 }
1742 direction_infinity_norm_ = norm;
1743 IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1744 num_rows_ == 0 ? 0.0
1745 : static_cast<double>(direction_.non_zeros.size()) /
1746 static_cast<double>(num_rows_.value())));
1747}
1748
1749Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1750 SCOPED_TIME_STAT(&function_stats_);
1751 compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1752 for (const auto e : direction_) {
1753 compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1754 &error_);
1755 }
1756 return InfinityNorm(error_);
1757}
1758
1759template <bool is_entering_reduced_cost_positive>
1760Fractional RevisedSimplex::GetRatio(const DenseRow& lower_bounds,
1761 const DenseRow& upper_bounds,
1762 RowIndex row) const {
1763 const ColIndex col = basis_[row];
1764 const Fractional direction = direction_[row];
1765 const Fractional value = variable_values_.Get(col);
1766 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1767 DCHECK_NE(direction, 0.0);
1768 if (is_entering_reduced_cost_positive) {
1769 if (direction > 0.0) {
1770 return (upper_bounds[col] - value) / direction;
1771 } else {
1772 return (lower_bounds[col] - value) / direction;
1773 }
1774 } else {
1775 if (direction > 0.0) {
1776 return (value - lower_bounds[col]) / direction;
1777 } else {
1778 return (value - upper_bounds[col]) / direction;
1779 }
1780 }
1781}
1782
1783template <bool is_entering_reduced_cost_positive>
1784Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1785 Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1786 SCOPED_TIME_STAT(&function_stats_);
1787 const Fractional harris_tolerance =
1788 parameters_.harris_tolerance_ratio() *
1789 parameters_.primal_feasibility_tolerance();
1790 const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1791 parameters_.primal_feasibility_tolerance();
1792
1793 // Initially, we can skip any variable with a ratio greater than
1794 // bound_flip_ratio since it seems to be always better to choose the
1795 // bound-flip over such leaving variable.
1796 Fractional harris_ratio = bound_flip_ratio;
1797 leaving_candidates->Clear();
1798
1799 // If the basis is refactorized, then we should have everything with a good
1800 // precision, so we only consider "acceptable" pivots. Otherwise we consider
1801 // all the entries, and if the algorithm return a pivot that is too small, we
1802 // will refactorize and recompute the relevant quantities.
1803 const Fractional threshold = basis_factorization_.IsRefactorized()
1804 ? parameters_.minimum_acceptable_pivot()
1805 : parameters_.ratio_test_zero_threshold();
1806
1807 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1808 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1809 for (const auto e : direction_) {
1810 const Fractional magnitude = std::abs(e.coefficient());
1811 if (magnitude <= threshold) continue;
1812 const Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(
1813 lower_bounds, upper_bounds, e.row());
1814 if (ratio <= harris_ratio) {
1815 leaving_candidates->SetCoefficient(e.row(), ratio);
1816
1817 // The second max() makes sure harris_ratio is lower bounded by a small
1818 // positive value. The more classical approach is to bound it by 0.0 but
1819 // since we will always perform a small positive step, we allow any
1820 // variable to go a bit more out of bound (even if it is past the harris
1821 // tolerance). This increase the number of candidates and allows us to
1822 // choose a more numerically stable pivot.
1823 //
1824 // Note that at least lower bounding it by 0.0 is really important on
1825 // numerically difficult problems because its helps in the choice of a
1826 // stable pivot.
1827 harris_ratio = std::min(harris_ratio,
1828 std::max(minimum_delta / magnitude,
1829 ratio + harris_tolerance / magnitude));
1830 }
1831 }
1832 return harris_ratio;
1833}
1834
1835namespace {
1836
1837// Returns true if the candidate ratio is supposed to be more stable than the
1838// current ratio (or if the two are equal).
1839// The idea here is to take, by order of preference:
1840// - the minimum positive ratio in order to intoduce a primal infeasibility
1841// which is as small as possible.
1842// - or the least negative one in order to have the smallest bound shift
1843// possible on the leaving variable.
1844bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1845 if (current >= 0.0) {
1846 return candidate >= 0.0 && candidate <= current;
1847 } else {
1848 return candidate >= current;
1849 }
1850}
1851
1852} // namespace
1853
1854// Ratio-test or Quotient-test. Choose the row of the leaving variable.
1855// Known as CHUZR or CHUZRO in FORTRAN codes.
1856Status RevisedSimplex::ChooseLeavingVariableRow(
1857 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1858 RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1859 SCOPED_TIME_STAT(&function_stats_);
1860 GLOP_RETURN_ERROR_IF_NULL(refactorize);
1861 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1862 GLOP_RETURN_ERROR_IF_NULL(step_length);
1863 DCHECK_COL_BOUNDS(entering_col);
1864 DCHECK_NE(0.0, reduced_cost);
1865
1866 // A few cases will cause the test to be recomputed from the beginning.
1867 int stats_num_leaving_choices = 0;
1868 equivalent_leaving_choices_.clear();
1869 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1870 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1871 while (true) {
1872 stats_num_leaving_choices = 0;
1873
1874 // We initialize current_ratio with the maximum step the entering variable
1875 // can take (bound-flip). Note that we do not use tolerance here.
1876 const Fractional entering_value = variable_values_.Get(entering_col);
1877 Fractional current_ratio =
1878 (reduced_cost > 0.0) ? entering_value - lower_bounds[entering_col]
1879 : upper_bounds[entering_col] - entering_value;
1880 DCHECK_GT(current_ratio, 0.0);
1881
1882 // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1883 // actually computes the minimum leaving ratio of all the variables. This is
1884 // the same as the 'classic' ratio test.
1885 const Fractional harris_ratio =
1886 (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1887 current_ratio, &leaving_candidates_)
1888 : ComputeHarrisRatioAndLeavingCandidates<false>(
1889 current_ratio, &leaving_candidates_);
1890
1891 // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1892 // variable too much out of bounds), we take it as it is always stable and
1893 // fast.
1894 if (current_ratio <= harris_ratio) {
1895 *leaving_row = kInvalidRow;
1896 *step_length = current_ratio;
1897 break;
1898 }
1899
1900 // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1901 // <= harris_ratio', we choose the leaving row with the largest coefficient.
1902 //
1903 // This has a big impact, because picking a leaving variable with a small
1904 // direction_[row] is the main source of Abnormal LU errors.
1905 Fractional pivot_magnitude = 0.0;
1906 stats_num_leaving_choices = 0;
1907 *leaving_row = kInvalidRow;
1908 equivalent_leaving_choices_.clear();
1909 for (const SparseColumn::Entry e : leaving_candidates_) {
1910 const Fractional ratio = e.coefficient();
1911 if (ratio > harris_ratio) continue;
1912 ++stats_num_leaving_choices;
1913 const RowIndex row = e.row();
1914
1915 // If the magnitudes are the same, we choose the leaving variable with
1916 // what is probably the more stable ratio, see
1917 // IsRatioMoreOrEquallyStable().
1918 const Fractional candidate_magnitude = std::abs(direction_[row]);
1919 if (candidate_magnitude < pivot_magnitude) continue;
1920 if (candidate_magnitude == pivot_magnitude) {
1921 if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1922 if (ratio == current_ratio) {
1923 DCHECK_NE(kInvalidRow, *leaving_row);
1924 equivalent_leaving_choices_.push_back(row);
1925 continue;
1926 }
1927 }
1928 equivalent_leaving_choices_.clear();
1929 current_ratio = ratio;
1930 pivot_magnitude = candidate_magnitude;
1931 *leaving_row = row;
1932 }
1933
1934 // Break the ties randomly.
1935 if (!equivalent_leaving_choices_.empty()) {
1936 equivalent_leaving_choices_.push_back(*leaving_row);
1937 *leaving_row =
1938 equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1939 0, equivalent_leaving_choices_.size() - 1)(random_)];
1940 }
1941
1942 // Since we took care of the bound-flip at the beginning, at this point
1943 // we have a valid leaving row.
1944 DCHECK_NE(kInvalidRow, *leaving_row);
1945
1946 // A variable already outside one of its bounds +/- tolerance is considered
1947 // at its bound and its ratio is zero. Not doing this may lead to a step
1948 // that moves the objective in the wrong direction. We may want to allow
1949 // such steps, but then we will need to check that it doesn't break the
1950 // bounds of the other variables.
1951 if (current_ratio <= 0.0) {
1952 // Instead of doing a zero step, we do a small positive step. This
1953 // helps on degenerate problems.
1954 const Fractional minimum_delta =
1955 parameters_.degenerate_ministep_factor() *
1956 parameters_.primal_feasibility_tolerance();
1957 *step_length = minimum_delta / pivot_magnitude;
1958 } else {
1959 *step_length = current_ratio;
1960 }
1961
1962 // Note(user): Testing the pivot at each iteration is useful for debugging
1963 // an LU factorization problem. Remove the false if you need to investigate
1964 // this, it makes sure that this will be compiled away.
1965 if (/* DISABLES CODE */ (false)) {
1966 TestPivot(entering_col, *leaving_row);
1967 }
1968
1969 // We try various "heuristics" to avoid a small pivot.
1970 //
1971 // The smaller 'direction_[*leaving_row]', the less precise
1972 // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1973 // ill-conditioned bases or even to matrices that are not a basis at all if
1974 // the actual (infinite-precision) coefficient is zero.
1975 //
1976 // TODO(user): We may have to choose another entering column if
1977 // we cannot prevent pivoting by a small pivot.
1978 // (Chvatal, p.115, about epsilon2.)
1979 if (pivot_magnitude <
1980 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1981 // The first countermeasure is to recompute everything to the best
1982 // precision we can in the hope of avoiding such a choice. Note that this
1983 // helps a lot on the Netlib problems.
1984 if (!basis_factorization_.IsRefactorized()) {
1985 VLOG(1) << "Refactorizing to avoid pivoting by "
1986 << direction_[*leaving_row]
1987 << " direction_infinity_norm_ = " << direction_infinity_norm_
1988 << " reduced cost = " << reduced_cost;
1989 *refactorize = true;
1990 return Status::OK();
1991 }
1992
1993 // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1994 // we kwnow that this pivot will still have an acceptable magnitude.
1995 //
1996 // TODO(user): An issue left to fix is that if there is no such pivot at
1997 // all, then we will report unbounded even if this is not really the case.
1998 // As of 2018/07/18, this happens on l30.mps.
1999 VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
2000 << " direction_infinity_norm_ = " << direction_infinity_norm_
2001 << " reduced cost = " << reduced_cost;
2002 DCHECK_GE(std::abs(direction_[*leaving_row]),
2003 parameters_.minimum_acceptable_pivot());
2004 IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
2005 }
2006 break;
2007 }
2008
2009 // Update the target bound.
2010 if (*leaving_row != kInvalidRow) {
2011 const bool is_reduced_cost_positive = (reduced_cost > 0.0);
2012 const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
2013 *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
2014 ? upper_bounds[basis_[*leaving_row]]
2015 : lower_bounds[basis_[*leaving_row]];
2016 }
2017
2018 // Stats.
2020 ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
2021 if (!equivalent_leaving_choices_.empty()) {
2022 ratio_test_stats_.num_perfect_ties.Add(
2023 equivalent_leaving_choices_.size());
2024 }
2025 if (*leaving_row != kInvalidRow) {
2026 ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
2027 }
2028 });
2029 return Status::OK();
2030}
2031
2032namespace {
2033
2034// Store a row with its ratio, coefficient magnitude and target bound. This is
2035// used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
2036// details.
2037struct BreakPoint {
2038 BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
2039 Fractional _target_bound)
2040 : row(_row),
2041 ratio(_ratio),
2042 coeff_magnitude(_coeff_magnitude),
2043 target_bound(_target_bound) {}
2044
2045 // We want to process the breakpoints by increasing ratio and decreasing
2046 // coefficient magnitude (if the ratios are the same). Returns false if "this"
2047 // is before "other" in a priority queue.
2048 bool operator<(const BreakPoint& other) const {
2049 if (ratio == other.ratio) {
2050 if (coeff_magnitude == other.coeff_magnitude) {
2051 return row > other.row;
2052 }
2053 return coeff_magnitude < other.coeff_magnitude;
2054 }
2055 return ratio > other.ratio;
2056 }
2057
2058 RowIndex row;
2062};
2063
2064} // namespace
2065
2066void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
2067 ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
2068 RowIndex* leaving_row, Fractional* step_length,
2069 Fractional* target_bound) const {
2070 SCOPED_TIME_STAT(&function_stats_);
2071 RETURN_IF_NULL(refactorize);
2072 RETURN_IF_NULL(leaving_row);
2073 RETURN_IF_NULL(step_length);
2074 DCHECK_COL_BOUNDS(entering_col);
2075 DCHECK_NE(0.0, reduced_cost);
2076 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2077 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2078
2079 // We initialize current_ratio with the maximum step the entering variable
2080 // can take (bound-flip). Note that we do not use tolerance here.
2081 const Fractional entering_value = variable_values_.Get(entering_col);
2082 Fractional current_ratio = (reduced_cost > 0.0)
2083 ? entering_value - lower_bounds[entering_col]
2084 : upper_bounds[entering_col] - entering_value;
2085 DCHECK_GT(current_ratio, 0.0);
2086
2087 std::vector<BreakPoint> breakpoints;
2088 const Fractional tolerance = parameters_.primal_feasibility_tolerance();
2089 for (const auto e : direction_) {
2090 const Fractional direction =
2091 reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
2092 const Fractional magnitude = std::abs(direction);
2093 if (magnitude < tolerance) continue;
2094
2095 // Computes by how much we can add 'direction' to the basic variable value
2096 // with index 'row' until it changes of primal feasibility status. That is
2097 // from infeasible to feasible or from feasible to infeasible. Note that the
2098 // transition infeasible->feasible->infeasible is possible. We use
2099 // tolerances here, but when the step will be performed, it will move the
2100 // variable to the target bound (possibly taking a small negative step).
2101 //
2102 // Note(user): The negative step will only happen when the leaving variable
2103 // was slightly infeasible (less than tolerance). Moreover, the overall
2104 // infeasibility will not necessarily increase since it doesn't take into
2105 // account all the variables with an infeasibility smaller than the
2106 // tolerance, and here we will at least improve the one of the leaving
2107 // variable.
2108 const ColIndex col = basis_[e.row()];
2109 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
2110
2111 const Fractional value = variable_values_.Get(col);
2114 const Fractional to_lower = (lower_bound - tolerance - value) / direction;
2115 const Fractional to_upper = (upper_bound + tolerance - value) / direction;
2116
2117 // Enqueue the possible transitions. Note that the second tests exclude the
2118 // case where to_lower or to_upper are infinite.
2119 if (to_lower >= 0.0 && to_lower < current_ratio) {
2120 breakpoints.push_back(
2121 BreakPoint(e.row(), to_lower, magnitude, lower_bound));
2122 }
2123 if (to_upper >= 0.0 && to_upper < current_ratio) {
2124 breakpoints.push_back(
2125 BreakPoint(e.row(), to_upper, magnitude, upper_bound));
2126 }
2127 }
2128
2129 // Order the breakpoints by increasing ratio and decreasing coefficient
2130 // magnitude (if the ratios are the same).
2131 std::make_heap(breakpoints.begin(), breakpoints.end());
2132
2133 // Select the last breakpoint that still improves the infeasibility and has
2134 // the largest coefficient magnitude.
2135 Fractional improvement = std::abs(reduced_cost);
2136 Fractional best_magnitude = 0.0;
2137 *leaving_row = kInvalidRow;
2138 while (!breakpoints.empty()) {
2139 const BreakPoint top = breakpoints.front();
2140 // TODO(user): consider using >= here. That will lead to bigger ratio and
2141 // hence a better impact on the infeasibility. The drawback is that more
2142 // effort may be needed to update the reduced costs.
2143 //
2144 // TODO(user): Use a random tie breaking strategy for BreakPoint with
2145 // same ratio and same coefficient magnitude? Koberstein explains in his PhD
2146 // that it helped on the dual-simplex.
2147 if (top.coeff_magnitude > best_magnitude) {
2148 *leaving_row = top.row;
2149 current_ratio = top.ratio;
2150 best_magnitude = top.coeff_magnitude;
2151 *target_bound = top.target_bound;
2152 }
2153
2154 // As long as the sum of primal infeasibilities is decreasing, we look for
2155 // pivots that are numerically more stable.
2156 improvement -= top.coeff_magnitude;
2157 if (improvement <= 0.0) break;
2158 std::pop_heap(breakpoints.begin(), breakpoints.end());
2159 breakpoints.pop_back();
2160 }
2161
2162 // Try to avoid a small pivot by refactorizing.
2163 if (*leaving_row != kInvalidRow) {
2164 const Fractional threshold =
2165 parameters_.small_pivot_threshold() * direction_infinity_norm_;
2166 if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
2167 *refactorize = true;
2168 return;
2169 }
2170 }
2171 *step_length = current_ratio;
2172}
2173
2174// This implements the pricing step for the dual simplex.
2175Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
2176 Fractional* cost_variation,
2178 SCOPED_TIME_STAT(&function_stats_);
2179 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2180 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2182
2183 // This is not supposed to happen, but better be safe.
2184 if (dual_prices_.Size() == 0) {
2185 variable_values_.RecomputeDualPrices(
2186 parameters_.dual_price_prioritize_norm());
2187 }
2188
2189 // Return right away if there is no leaving variable.
2190 // Fill cost_variation and target_bound otherwise.
2191 *leaving_row = dual_prices_.GetMaximum();
2192 if (*leaving_row == kInvalidRow) return Status::OK();
2193
2194 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2195 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2196 const ColIndex leaving_col = basis_[*leaving_row];
2197 const Fractional value = variable_values_.Get(leaving_col);
2198 if (value < lower_bounds[leaving_col]) {
2199 *cost_variation = lower_bounds[leaving_col] - value;
2200 *target_bound = lower_bounds[leaving_col];
2201 DCHECK_GT(*cost_variation, 0.0);
2202 } else {
2203 *cost_variation = upper_bounds[leaving_col] - value;
2204 *target_bound = upper_bounds[leaving_col];
2205 DCHECK_LT(*cost_variation, 0.0);
2206 }
2207 return Status::OK();
2208}
2209
2210namespace {
2211
2212// Returns true if a basic variable with given cost and type is to be considered
2213// as a leaving candidate for the dual phase I.
2214bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
2215 Fractional threshold) {
2216 if (cost == 0.0) return false;
2219 (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
2220 (type == VariableType::LOWER_BOUNDED && cost > threshold);
2221}
2222
2223} // namespace
2224
2225// Important: The norm should be updated before this is called.
2226template <bool use_dense_update>
2227void RevisedSimplex::OnDualPriceChange(DenseColumn::ConstView squared_norm,
2228 RowIndex row, VariableType type,
2229 Fractional threshold) {
2230 const Fractional price = dual_pricing_vector_[row];
2231 const bool is_candidate =
2232 IsDualPhaseILeavingCandidate(price, type, threshold);
2233 if (is_candidate) {
2234 if (use_dense_update) {
2235 dual_prices_.DenseAddOrUpdate(row, Square(price) / squared_norm[row]);
2236 } else {
2237 dual_prices_.AddOrUpdate(row, Square(price) / squared_norm[row]);
2238 }
2239 } else {
2240 dual_prices_.Remove(row);
2241 }
2242}
2243
2244void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
2245 ColIndex entering_col) {
2246 SCOPED_TIME_STAT(&function_stats_);
2247
2248 // If the prices are going to be recomputed, there is nothing to do. See the
2249 // logic at the beginning of DualPhaseIChooseLeavingVariableRow() which must
2250 // be in sync with this one.
2251 //
2252 // TODO(user): Move the logic in a single class, so it is easier to enforce
2253 // invariant.
2254 if (reduced_costs_.AreReducedCostsRecomputed() ||
2255 dual_edge_norms_.NeedsBasisRefactorization() ||
2256 dual_pricing_vector_.empty()) {
2257 return;
2258 }
2259
2260 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2261 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2262
2263 // Note that because the norm are also updated only on the position of the
2264 // direction, scaled_dual_pricing_vector_ will be up to date.
2265 const DenseColumn::ConstView squared_norms =
2266 dual_edge_norms_.GetEdgeSquaredNorms();
2267
2268 // Convert the dual_pricing_vector_ from the old basis into the new one (which
2269 // is the same as multiplying it by an Eta matrix corresponding to the
2270 // direction).
2271 const Fractional step =
2272 dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2273 for (const auto e : direction_) {
2274 dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2275 OnDualPriceChange(squared_norms, e.row(), variable_type[basis_[e.row()]],
2276 threshold);
2277 }
2278 dual_pricing_vector_[leaving_row] = step;
2279
2280 // The entering_col which was dual-infeasible is now dual-feasible, so we
2281 // have to remove it from the infeasibility sum.
2282 dual_pricing_vector_[leaving_row] -=
2283 dual_infeasibility_improvement_direction_[entering_col];
2284 if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2285 --num_dual_infeasible_positions_;
2286 }
2287 dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2288
2289 // The leaving variable will also be dual-feasible.
2290 dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2291
2292 // Update the leaving row entering candidate status.
2293 OnDualPriceChange(squared_norms, leaving_row, variable_type[entering_col],
2294 threshold);
2295}
2296
2297template <typename Cols>
2298void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2299 const Cols& cols) {
2300 SCOPED_TIME_STAT(&function_stats_);
2301 bool something_to_do = false;
2302 const DenseBitRow::ConstView can_decrease =
2303 variables_info_.GetCanDecreaseBitRow().const_view();
2304 const DenseBitRow::ConstView can_increase =
2305 variables_info_.GetCanIncreaseBitRow().const_view();
2306 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
2307 const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2308 auto improvement_direction = dual_infeasibility_improvement_direction_.view();
2309 for (const ColIndex col : cols) {
2310 const Fractional reduced_cost = reduced_costs[col];
2311 const Fractional sign =
2312 (can_increase[col] && reduced_cost < -tolerance) ? 1.0
2313 : (can_decrease[col] && reduced_cost > tolerance) ? -1.0
2314 : 0.0;
2315 if (sign != improvement_direction[col]) {
2316 if (sign == 0.0) {
2317 --num_dual_infeasible_positions_;
2318 } else if (improvement_direction[col] == 0.0) {
2319 ++num_dual_infeasible_positions_;
2320 }
2321 if (!something_to_do) {
2322 initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2323 initially_all_zero_scratchpad_.ClearSparseMask();
2324 initially_all_zero_scratchpad_.non_zeros.clear();
2325 something_to_do = true;
2326 }
2327
2328 // We add a factor 10 because of the scattered access.
2329 num_update_price_operations_ +=
2330 10 * compact_matrix_.column(col).num_entries().value();
2332 col, sign - improvement_direction[col],
2333 &initially_all_zero_scratchpad_);
2334 improvement_direction[col] = sign;
2335 }
2336 }
2337 if (something_to_do) {
2338 initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2339 initially_all_zero_scratchpad_.ClearSparseMask();
2340 const DenseColumn::ConstView squared_norms =
2341 dual_edge_norms_.GetEdgeSquaredNorms();
2342
2343 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2344 const Fractional threshold = parameters_.ratio_test_zero_threshold();
2345 basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2346 if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2347 dual_prices_.StartDenseUpdates();
2348 for (RowIndex row(0); row < num_rows_; ++row) {
2349 if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2350 dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2351 OnDualPriceChange</*use_dense_update=*/true>(
2352 squared_norms, row, variable_type[basis_[row]], threshold);
2353 }
2354 initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2355 } else {
2356 for (const auto e : initially_all_zero_scratchpad_) {
2357 dual_pricing_vector_[e.row()] += e.coefficient();
2358 OnDualPriceChange(squared_norms, e.row(),
2359 variable_type[basis_[e.row()]], threshold);
2360 initially_all_zero_scratchpad_[e.row()] = 0.0;
2361 }
2362 }
2363 initially_all_zero_scratchpad_.non_zeros.clear();
2364 }
2365}
2366
2367Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2368 RowIndex* leaving_row, Fractional* cost_variation,
2370 SCOPED_TIME_STAT(&function_stats_);
2371 GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2372 GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2373 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2374 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2375
2376 // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2377 // positions and contains the sign in which the reduced cost of this column
2378 // needs to move to improve the feasibility otherwise (+1 or -1).
2379 //
2380 // Its current value was the one used to compute dual_pricing_vector_ and
2381 // was updated accordingly by DualPhaseIUpdatePrice().
2382 //
2383 // If more variables changed of dual-feasibility status during the last
2384 // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2385 // take them into account.
2386 if (reduced_costs_.AreReducedCostsRecomputed() ||
2387 dual_edge_norms_.NeedsBasisRefactorization() ||
2388 dual_pricing_vector_.empty()) {
2389 // Recompute everything from scratch.
2390 num_dual_infeasible_positions_ = 0;
2391 dual_pricing_vector_.AssignToZero(num_rows_);
2392 dual_prices_.ClearAndResize(num_rows_);
2393 dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2394 DualPhaseIUpdatePriceOnReducedCostChange(
2395 variables_info_.GetIsRelevantBitRow());
2396 } else {
2397 // Update row is still equal to the row used during the last iteration
2398 // to update the reduced costs.
2399 DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2400 }
2401
2402 // If there is no dual-infeasible position, we are done.
2403 *leaving_row = kInvalidRow;
2404 if (num_dual_infeasible_positions_ == 0) return Status::OK();
2405
2406 *leaving_row = dual_prices_.GetMaximum();
2407
2408 // Returns right away if there is no leaving variable or fill the other
2409 // return values otherwise.
2410 if (*leaving_row == kInvalidRow) return Status::OK();
2411 *cost_variation = dual_pricing_vector_[*leaving_row];
2412 const ColIndex leaving_col = basis_[*leaving_row];
2413 if (*cost_variation < 0.0) {
2414 *target_bound = upper_bounds[leaving_col];
2415 } else {
2416 *target_bound = lower_bounds[leaving_col];
2417 }
2418 DCHECK(IsFinite(*target_bound));
2419 return Status::OK();
2420}
2421
2422template <typename BoxedVariableCols>
2423void RevisedSimplex::MakeBoxedVariableDualFeasible(
2424 const BoxedVariableCols& cols, bool update_basic_values) {
2425 SCOPED_TIME_STAT(&function_stats_);
2426 std::vector<ColIndex> changed_cols;
2427
2428 // It is important to flip bounds within a tolerance because of precision
2429 // errors. Otherwise, this leads to cycling on many of the Netlib problems
2430 // since this is called at each iteration (because of the bound-flipping ratio
2431 // test).
2432 //
2433 // TODO(user): During an iteration, we might want to switch with a lower
2434 // tolerance bounds flip that were deemed good so that we can more easily make
2435 // progess?
2436 const Fractional threshold = reduced_costs_.GetDualFeasibilityTolerance();
2437
2438 const DenseRow& variable_values = variable_values_.GetDenseRow();
2439 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
2440 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2441 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2442 const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2443 for (const ColIndex col : cols) {
2444 const Fractional reduced_cost = reduced_costs[col];
2445 const VariableStatus status = variable_status[col];
2446 DCHECK(variables_info_.GetTypeRow()[col] ==
2448 // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2449 DCHECK(variable_values[col] == lower_bounds[col] ||
2450 variable_values[col] == upper_bounds[col] ||
2452 if (reduced_cost > threshold && status == VariableStatus::AT_UPPER_BOUND) {
2453 variables_info_.UpdateToNonBasicStatus(col,
2455 changed_cols.push_back(col);
2456 } else if (reduced_cost < -threshold &&
2458 variables_info_.UpdateToNonBasicStatus(col,
2460 changed_cols.push_back(col);
2461 }
2462 }
2463
2464 if (!changed_cols.empty()) {
2465 iteration_stats_.num_dual_flips.Add(changed_cols.size());
2466 variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2467 update_basic_values);
2468 }
2469}
2470
2471Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2472 RowIndex leaving_row, Fractional target_bound) {
2473 SCOPED_TIME_STAT(&function_stats_);
2474
2475 // We just want the leaving variable to go to its target_bound.
2476 const ColIndex leaving_col = basis_[leaving_row];
2477 const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2478 Fractional unscaled_step = leaving_variable_value - target_bound;
2479
2480 // In Chvatal p 157 update_[entering_col] is used instead of
2481 // direction_[leaving_row], but the two quantities are actually the
2482 // same. This is because update_[col] is the value at leaving_row of
2483 // the right inverse of col and direction_ is the right inverse of the
2484 // entering_col. Note that direction_[leaving_row] is probably more
2485 // precise.
2486 // TODO(user): use this to check precision and trigger recomputation.
2487 return unscaled_step / direction_[leaving_row];
2488}
2489
2490bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2491 VLOG(1) << "Test pivot.";
2492 SCOPED_TIME_STAT(&function_stats_);
2493 const ColIndex leaving_col = basis_[leaving_row];
2494 basis_[leaving_row] = entering_col;
2495
2496 // TODO(user): If 'is_ok' is true, we could use the computed lu in
2497 // basis_factorization_ rather than recompute it during UpdateAndPivot().
2498 CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2499 const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2500 basis_[leaving_row] = leaving_col;
2501 return is_ok;
2502}
2503
2504// Note that this function is an optimization and that if it was doing nothing
2505// the algorithm will still be correct and work. Using it does change the pivot
2506// taken during the simplex method though.
2507void RevisedSimplex::PermuteBasis() {
2508 SCOPED_TIME_STAT(&function_stats_);
2509
2510 // Fetch the current basis column permutation and return if it is empty which
2511 // means the permutation is the identity.
2512 const ColumnPermutation& col_perm =
2513 basis_factorization_.GetColumnPermutation();
2514 if (col_perm.empty()) return;
2515
2516 // Permute basis_.
2517 ApplyColumnPermutationToRowIndexedVector(col_perm, &basis_, &tmp_basis_);
2518
2519 // Permute dual_pricing_vector_ if needed.
2520 if (!dual_pricing_vector_.empty()) {
2521 // TODO(user): We need to permute dual_prices_ too now, we recompute
2522 // everything one each basis factorization, so this don't matter.
2523 ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_,
2524 &tmp_dual_pricing_vector_);
2525 }
2526
2527 // Notify the other classes.
2528 reduced_costs_.UpdateDataOnBasisPermutation();
2529 dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2530
2531 // Finally, remove the column permutation from all subsequent solves since
2532 // it has been taken into account in basis_.
2533 basis_factorization_.SetColumnPermutationToIdentity();
2534}
2535
2536Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2537 RowIndex leaving_row,
2539 SCOPED_TIME_STAT(&function_stats_);
2540
2541 // Tricky and a bit hacky.
2542 //
2543 // The basis update code assumes that we already computed the left inverse of
2544 // the leaving row, otherwise it will just refactorize the basis. This left
2545 // inverse is needed by update_row_.ComputeUpdateRow(), so in most case it
2546 // will already be computed. However, in some situation we don't need the
2547 // full update row, so just the left inverse can be computed.
2548 //
2549 // TODO(user): Ideally this shouldn't be needed if we are going to refactorize
2550 // the basis anyway. So we should know that before hand which is currently
2551 // hard to do.
2552 Fractional pivot_from_update_row;
2553 if (update_row_.IsComputedFor(leaving_row)) {
2554 pivot_from_update_row = update_row_.GetCoefficient(entering_col);
2555 } else {
2556 // We only need the left inverse and the update row position at the
2557 // entering_col to check precision.
2558 update_row_.ComputeUnitRowLeftInverse(leaving_row);
2559 pivot_from_update_row = compact_matrix_.ColumnScalarProduct(
2560 entering_col, update_row_.GetUnitRowLeftInverse().values);
2561 }
2562
2563 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2564 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2565 const ColIndex leaving_col = basis_[leaving_row];
2566 const VariableStatus leaving_variable_status =
2567 lower_bounds[leaving_col] == upper_bounds[leaving_col]
2569 : target_bound == lower_bounds[leaving_col]
2572 if (variable_values_.Get(leaving_col) != target_bound) {
2573 ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2574 target_bound);
2575 }
2576 UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2577
2578 // Test precision by comparing two ways to get the "pivot".
2579 const Fractional pivot_from_direction = direction_[leaving_row];
2580 const Fractional diff =
2581 std::abs(pivot_from_update_row - pivot_from_direction);
2582 if (diff > parameters_.refactorization_threshold() *
2583 (1.0 + std::min(std::abs(pivot_from_update_row),
2584 std::abs(pivot_from_direction)))) {
2585 VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2586 << " diff = " << diff;
2587 // TODO(user): We need to be careful when we modify parameter like this
2588 // since it will not be reset until the next SetParameters() call.
2589 if (basis_factorization_.NumUpdates() < 10) {
2590 Fractional threshold = parameters_.lu_factorization_pivot_threshold();
2591 threshold = std::min(threshold * 1.5, 0.9);
2592 VLOG(1) << "Increasing LU pivot threshold " << threshold;
2593 parameters_.set_lu_factorization_pivot_threshold(threshold);
2594 basis_factorization_.SetParameters(parameters_);
2595 }
2596
2597 last_refactorization_reason_ = RefactorizationReason::IMPRECISE_PIVOT;
2598 GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2599 } else {
2601 basis_factorization_.Update(entering_col, leaving_row, direction_));
2602 }
2603 if (basis_factorization_.IsRefactorized()) {
2604 PermuteBasis();
2605 }
2606 return Status::OK();
2607}
2608
2609Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2610 SCOPED_TIME_STAT(&function_stats_);
2611 if (*refactorize && !basis_factorization_.IsRefactorized()) {
2612 GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2613 update_row_.Invalidate();
2614 PermuteBasis();
2615 }
2616 *refactorize = false;
2617 return Status::OK();
2618}
2619
2621 if (col >= integrality_scale_.size()) {
2622 integrality_scale_.resize(col + 1, 0.0);
2623 }
2624 integrality_scale_[col] = scale;
2625}
2626
2627Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2629 Cleanup update_deterministic_time_on_return(
2630 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2631
2632 // Get all non-basic variables with a reduced costs close to zero.
2633 // Note that because we only choose entering candidate with a cost of zero,
2634 // this set will not change (modulo epsilons).
2635 const DenseRow::ConstView rc = reduced_costs_.GetReducedCosts();
2636 std::vector<ColIndex> candidates;
2637 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2638 if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2639 if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2640 }
2641
2642 bool refactorize = false;
2643 int num_pivots = 0;
2644 Fractional total_gain = 0.0;
2645 for (int i = 0; i < 10; ++i) {
2646 AdvanceDeterministicTime(time_limit);
2647 if (time_limit->LimitReached()) break;
2648 if (num_pivots >= 5) break;
2649 if (candidates.empty()) break;
2650
2651 // Pick a random one and remove it from the list.
2652 const int index =
2653 std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2654 const ColIndex entering_col = candidates[index];
2655 std::swap(candidates[index], candidates.back());
2656 candidates.pop_back();
2657
2658 // We need the entering variable to move in the correct direction.
2659 Fractional fake_rc = 1.0;
2660 if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2661 CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2662 fake_rc = -1.0;
2663 }
2664
2665 // Refactorize if needed.
2666 if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2667 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2668
2669 // Compute the direction and by how much we can move along it.
2670 ComputeDirection(entering_col);
2671 Fractional step_length;
2672 RowIndex leaving_row;
2674 bool local_refactorize = false;
2676 ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2677 &leaving_row, &step_length, &target_bound));
2678
2679 if (local_refactorize) continue;
2680 if (step_length == kInfinity || step_length == -kInfinity) continue;
2681 if (std::abs(step_length) <= 1e-6) continue;
2682 if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2683 continue;
2684 }
2685 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2686
2687 // Evaluate if pivot reduce the fractionality of the basis.
2688 //
2689 // TODO(user): Count with more weight variable with a small domain, i.e.
2690 // binary variable, compared to a variable in [0, 1k] ?
2691 const auto get_diff = [this](ColIndex col, Fractional old_value,
2692 Fractional new_value) {
2693 if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2694 return 0.0;
2695 }
2696 const Fractional s = integrality_scale_[col];
2697 return (std::abs(new_value * s - std::round(new_value * s)) -
2698 std::abs(old_value * s - std::round(old_value * s)));
2699 };
2700 Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2701 variable_values_.Get(entering_col) + step);
2702 for (const auto e : direction_) {
2703 const ColIndex col = basis_[e.row()];
2704 const Fractional old_value = variable_values_.Get(col);
2705 const Fractional new_value = old_value - e.coefficient() * step;
2706 diff += get_diff(col, old_value, new_value);
2707 }
2708
2709 // Ignore low decrease in integrality.
2710 if (diff > -1e-2) continue;
2711 total_gain -= diff;
2712
2713 // We perform the change.
2714 num_pivots++;
2715 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2716
2717 // This is a bound flip of the entering column.
2718 if (leaving_row == kInvalidRow) {
2719 if (step > 0.0) {
2720 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2722 } else if (step < 0.0) {
2723 SetNonBasicVariableStatusAndDeriveValue(entering_col,
2725 }
2726 continue;
2727 }
2728
2729 // Perform the pivot.
2730 const ColIndex leaving_col = basis_[leaving_row];
2731 update_row_.ComputeUpdateRow(leaving_row);
2732
2733 // Note that this will only do work if the norms are computed.
2734 //
2735 // TODO(user): We should probably move all the "update" in a function so
2736 // that all "iterations" function can just reuse the same code. Everything
2737 // that is currently not "cleared" should be updated. If one does not want
2738 // that, then it is easy to call Clear() on the quantities that do not needs
2739 // to be kept in sync with the current basis.
2740 primal_edge_norms_.UpdateBeforeBasisPivot(
2741 entering_col, leaving_col, leaving_row, direction_, &update_row_);
2742 dual_edge_norms_.UpdateBeforeBasisPivot(
2743 entering_col, leaving_row, direction_,
2744 update_row_.GetUnitRowLeftInverse());
2745
2746 // TODO(user): Rather than maintaining this, it is probably better to
2747 // recompute it in one go after Polish() is done. We don't use the reduced
2748 // costs here as we just assume that the set of candidates does not change.
2749 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2750 &update_row_);
2751
2752 const Fractional dir = -direction_[leaving_row] * step;
2753 const bool is_degenerate =
2754 (dir == 0.0) ||
2755 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2756 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2757 if (!is_degenerate) {
2758 variable_values_.Set(leaving_col, target_bound);
2759 }
2761 UpdateAndPivot(entering_col, leaving_row, target_bound));
2762 }
2763
2764 VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2765 return Status::OK();
2766}
2767
2768// Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2769// x an n-vector.
2770//
2771// x is split in two parts x_B and x_N (B standing for basis).
2772// In the same way, A is split in A_B (also known as B) and A_N, and
2773// c is split into c_B and c_N.
2774//
2775// The goal is to minimize c_B.x_B + c_N.x_N
2776// subject to B.x_B + A_N.x_N = 0
2777// and x_lower <= x <= x_upper.
2778//
2779// To minimize c.x, at each iteration a variable from x_N is selected to
2780// enter the basis, and a variable from x_B is selected to leave the basis.
2781// To avoid explicit inversion of B, the algorithm solves two sub-systems:
2782// y.B = c_B and B.d = a (a being the entering column).
2783Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) {
2785 Cleanup update_deterministic_time_on_return(
2786 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2787 num_consecutive_degenerate_iterations_ = 0;
2788 bool refactorize = false;
2789 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
2790
2791 // At this point, we are not sure the prices are always up to date, so
2792 // lets always reset them for the first iteration below.
2793 primal_prices_.ForceRecomputation();
2794
2795 if (phase_ == Phase::FEASIBILITY) {
2796 // Initialize the primal phase-I objective.
2797 // Note that this temporarily erases the problem objective.
2798 objective_.AssignToZero(num_cols_);
2799 variable_values_.UpdatePrimalPhaseICosts(
2800 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2801 reduced_costs_.ResetForNewObjective();
2802 }
2803
2804 while (true) {
2805 AdvanceDeterministicTime(time_limit);
2806 if (time_limit->LimitReached()) break;
2807
2808 // TODO(user): we may loop a bit more than the actual number of iteration.
2809 // fix.
2811 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2812
2813 // Trigger a refactorization if one of the class we use request it.
2814 if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) {
2815 last_refactorization_reason_ = RefactorizationReason::RC;
2816 refactorize = true;
2817 }
2818 if (!refactorize && primal_edge_norms_.NeedsBasisRefactorization()) {
2819 last_refactorization_reason_ = RefactorizationReason::NORM;
2820 refactorize = true;
2821 }
2822 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2823
2824 if (basis_factorization_.IsRefactorized()) {
2825 CorrectErrorsOnVariableValues();
2826 DisplayIterationInfo(/*primal=*/true, last_refactorization_reason_);
2827 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
2828
2829 if (phase_ == Phase::FEASIBILITY) {
2830 // Since the variable values may have been recomputed, we need to
2831 // recompute the primal infeasible variables and update their costs.
2832 if (variable_values_.UpdatePrimalPhaseICosts(
2833 util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2834 &objective_)) {
2835 reduced_costs_.ResetForNewObjective();
2836 }
2837 }
2838
2839 // Computing the objective at each iteration takes time, so we just
2840 // check the limit when the basis is refactorized.
2841 if (phase_ == Phase::OPTIMIZATION &&
2842 ComputeObjectiveValue() < primal_objective_limit_) {
2843 VLOG(1) << "Stopping the primal simplex because"
2844 << " the objective limit " << primal_objective_limit_
2845 << " has been reached.";
2846 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2847 objective_limit_reached_ = true;
2848 return Status::OK();
2849 }
2850 } else if (phase_ == Phase::FEASIBILITY) {
2851 // Note that direction_.non_zeros contains the positions of the basic
2852 // variables whose values were updated during the last iteration.
2853 if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2854 &objective_)) {
2855 reduced_costs_.ResetForNewObjective();
2856 }
2857 }
2858
2859 const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
2860 if (entering_col == kInvalidCol) {
2861 if (reduced_costs_.AreReducedCostsPrecise() &&
2862 basis_factorization_.IsRefactorized()) {
2863 if (phase_ == Phase::FEASIBILITY) {
2864 const Fractional primal_infeasibility =
2865 variable_values_.ComputeMaximumPrimalInfeasibility();
2866 if (primal_infeasibility <
2867 parameters_.primal_feasibility_tolerance()) {
2868 problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2869 } else {
2870 VLOG(1) << "Infeasible problem! infeasibility = "
2871 << primal_infeasibility;
2872 problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2873 }
2874 } else {
2875 problem_status_ = ProblemStatus::OPTIMAL;
2876 }
2877 break;
2878 }
2879
2880 VLOG(1) << "Optimal reached, double checking...";
2881 reduced_costs_.MakeReducedCostsPrecise();
2882 refactorize = true;
2883 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
2884 continue;
2885 }
2886
2887 DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2888
2889 // Solve the system B.d = a with a the entering column.
2890 ComputeDirection(entering_col);
2891
2892 // This might trigger a recomputation on the next iteration. If it returns
2893 // false, we will also try to see if there is not another more promising
2894 // entering column.
2895 if (!primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2896 direction_)) {
2897 primal_prices_.RecomputePriceAt(entering_col);
2898 continue;
2899 }
2900 const Fractional reduced_cost =
2901 reduced_costs_.TestEnteringReducedCostPrecision(entering_col,
2902 direction_);
2903
2904 // The test might have changed the reduced cost of the entering_col.
2905 // If it is no longer a valid entering candidate, we loop.
2906 primal_prices_.RecomputePriceAt(entering_col);
2907 if (!reduced_costs_.IsValidPrimalEnteringCandidate(entering_col)) {
2908 reduced_costs_.MakeReducedCostsPrecise();
2909 VLOG(1) << "Skipping col #" << entering_col
2910 << " whose reduced cost is no longer valid under precise reduced "
2911 "cost: "
2912 << reduced_cost;
2913 continue;
2914 }
2915
2916 // This test takes place after the check for optimality/feasibility because
2917 // when running with 0 iterations, we still want to report
2918 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2919 // case at the beginning of the algorithm.
2920 if (num_iterations_ == parameters_.max_number_of_iterations()) break;
2921
2922 Fractional step_length;
2923 RowIndex leaving_row;
2925 if (phase_ == Phase::FEASIBILITY) {
2926 PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2927 &refactorize, &leaving_row,
2928 &step_length, &target_bound);
2929 } else {
2931 ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2932 &leaving_row, &step_length, &target_bound));
2933 }
2934 if (refactorize) {
2935 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
2936 continue;
2937 }
2938
2939 if (step_length == kInfinity || step_length == -kInfinity) {
2940 // On a validated input, we shouldn't have a length of -infinity even
2941 // though it can be slightly negative in some settings.
2942 DCHECK_NE(step_length, -kInfinity);
2943 if (!basis_factorization_.IsRefactorized() ||
2944 !reduced_costs_.AreReducedCostsPrecise()) {
2945 VLOG(1) << "Infinite step length, double checking...";
2946 reduced_costs_.MakeReducedCostsPrecise();
2947 refactorize = true;
2948 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
2949 continue;
2950 }
2951 if (phase_ == Phase::FEASIBILITY) {
2952 // This shouldn't happen by construction.
2953 VLOG(1) << "Unbounded feasibility problem !?";
2954 problem_status_ = ProblemStatus::ABNORMAL;
2955 } else {
2956 problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2957 solution_primal_ray_.AssignToZero(num_cols_);
2958 for (RowIndex row(0); row < num_rows_; ++row) {
2959 const ColIndex col = basis_[row];
2960 solution_primal_ray_[col] = -direction_[row];
2961 }
2962 solution_primal_ray_[entering_col] = 1.0;
2963 if (reduced_cost > 0.0) {
2964 ChangeSign(&solution_primal_ray_);
2965 }
2966 }
2967 break;
2968 }
2969
2970 Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2971 if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
2972 // For phase-I we currently always set the leaving variable to its exact
2973 // bound even if by doing so we may take a small step in the wrong
2974 // direction and may increase the overall infeasibility.
2975 //
2976 // TODO(user): Investigate alternatives even if this seems to work well in
2977 // practice. Note that the final returned solution will have the property
2978 // that all non-basic variables are at their exact bound, so it is nice
2979 // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2980 // this property cannot be found.
2981 step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2982 }
2983
2984 // Store the leaving_col before basis_ change.
2985 const ColIndex leaving_col =
2986 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2987
2988 // An iteration is called 'degenerate' if the leaving variable is already
2989 // primal-infeasible and we make it even more infeasible or if we do a zero
2990 // step.
2991 bool is_degenerate = false;
2992 if (leaving_row != kInvalidRow) {
2993 Fractional dir = -direction_[leaving_row] * step;
2994 is_degenerate =
2995 (dir == 0.0) ||
2996 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2997 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2998
2999 // If the iteration is not degenerate, the leaving variable should go to
3000 // its exact target bound (it is how the step is computed).
3001 if (!is_degenerate) {
3002 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3003 target_bound));
3004 }
3005 }
3006
3007 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3008 if (leaving_row != kInvalidRow) {
3009 // Important: the norm must be updated before the reduced_cost.
3010 primal_edge_norms_.UpdateBeforeBasisPivot(
3011 entering_col, basis_[leaving_row], leaving_row, direction_,
3012 &update_row_);
3013 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
3014 direction_, &update_row_);
3015 primal_prices_.UpdateBeforeBasisPivot(entering_col, &update_row_);
3016 if (!is_degenerate) {
3017 // On a non-degenerate iteration, the leaving variable should be at its
3018 // exact bound. This corrects an eventual small numerical error since
3019 // 'value + direction * step' where step is
3020 // '(target_bound - value) / direction'
3021 // may be slighlty different from target_bound.
3022 variable_values_.Set(leaving_col, target_bound);
3023 }
3025 UpdateAndPivot(entering_col, leaving_row, target_bound));
3027 if (is_degenerate) {
3028 timer.AlsoUpdate(&iteration_stats_.degenerate);
3029 } else {
3030 timer.AlsoUpdate(&iteration_stats_.normal);
3031 }
3032 });
3033 } else {
3034 // Bound flip. This makes sure that the flipping variable is at its bound
3035 // and has the correct status.
3037 variables_info_.GetTypeRow()[entering_col]);
3038 if (step > 0.0) {
3039 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3041 } else if (step < 0.0) {
3042 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3044 }
3045 primal_prices_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
3046 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3047 }
3048
3049 if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
3050 // Set the leaving variable to its exact bound.
3051 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3052
3053 // Change the objective value of the leaving variable to zero.
3054 reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
3055 &objective_[leaving_col]);
3056 primal_prices_.RecomputePriceAt(leaving_col);
3057 }
3058
3059 // Stats about consecutive degenerate iterations.
3060 if (step_length == 0.0) {
3061 num_consecutive_degenerate_iterations_++;
3062 } else {
3063 if (num_consecutive_degenerate_iterations_ > 0) {
3064 iteration_stats_.degenerate_run_size.Add(
3065 num_consecutive_degenerate_iterations_);
3066 num_consecutive_degenerate_iterations_ = 0;
3067 }
3068 }
3069 ++num_iterations_;
3070 }
3071 if (num_consecutive_degenerate_iterations_ > 0) {
3072 iteration_stats_.degenerate_run_size.Add(
3073 num_consecutive_degenerate_iterations_);
3074 }
3075 return Status::OK();
3076}
3077
3078// TODO(user): Two other approaches for the phase I described in Koberstein's
3079// PhD thesis seem worth trying at some point:
3080// - The subproblem approach, which enables one to use a normal phase II dual,
3081// but requires an efficient bound-flipping ratio test since the new problem
3082// has all its variables boxed. This one is implemented now, but require
3083// a bit more tuning.
3084// - Pan's method, which is really fast but have no theoretical guarantee of
3085// terminating and thus needs to use one of the other methods as a fallback if
3086// it fails to make progress.
3087//
3088// Note that the returned status applies to the primal problem!
3089Status RevisedSimplex::DualMinimize(bool feasibility_phase,
3090 TimeLimit* time_limit) {
3091 Cleanup update_deterministic_time_on_return(
3092 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3093 num_consecutive_degenerate_iterations_ = 0;
3094 bool refactorize = false;
3095 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
3096
3097 bound_flip_candidates_.clear();
3098
3099 // Leaving variable.
3100 RowIndex leaving_row;
3101 Fractional cost_variation;
3103
3104 // Entering variable.
3105 ColIndex entering_col;
3106
3107 while (true) {
3108 AdvanceDeterministicTime(time_limit);
3109 if (time_limit->LimitReached()) break;
3110
3111 // TODO(user): we may loop a bit more than the actual number of iteration.
3112 // fix.
3114 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
3115
3116 // Trigger a refactorization if one of the class we use request it.
3117 //
3118 // TODO(user): Estimate when variable values are imprecise and refactor too.
3119 const bool old_refactorize_value = refactorize;
3120 if (!refactorize && reduced_costs_.NeedsBasisRefactorization()) {
3121 last_refactorization_reason_ = RefactorizationReason::RC;
3122 refactorize = true;
3123 }
3124 if (!refactorize && dual_edge_norms_.NeedsBasisRefactorization()) {
3125 last_refactorization_reason_ = RefactorizationReason::NORM;
3126 refactorize = true;
3127 }
3128 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3129
3130 // If the basis is refactorized, we recompute all the values in order to
3131 // have a good precision.
3132 if (basis_factorization_.IsRefactorized()) {
3133 // We do not want to recompute the reduced costs too often, this is
3134 // because that may break the overall direction taken by the last steps
3135 // and may lead to less improvement on degenerate problems.
3136 //
3137 // For now, we just recompute them if refactorize was set during the
3138 // loop and not because of normal refactorization.
3139 //
3140 // During phase-I, we do want the reduced costs to be as precise as
3141 // possible. TODO(user): Investigate why and fix the TODO in
3142 // PermuteBasis().
3143 //
3144 // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
3145 // do recompute them, it is better to do that first.
3146 if (feasibility_phase || old_refactorize_value) {
3147 reduced_costs_.MakeReducedCostsPrecise();
3148 }
3149
3150 // TODO(user): Make RecomputeBasicVariableValues() do nothing
3151 // if it was already recomputed on a refactorized basis. This is the
3152 // same behavior as MakeReducedCostsPrecise().
3153 //
3154 // TODO(user): Do not recompute the variable values each time we
3155 // refactorize the matrix, like for the reduced costs? That may lead to
3156 // a worse behavior than keeping the "imprecise" version and only
3157 // recomputing it when its precision is above a threshold.
3158 if (!feasibility_phase) {
3159 MakeBoxedVariableDualFeasible(
3160 variables_info_.GetNonBasicBoxedVariables(),
3161 /*update_basic_values=*/false);
3162 variable_values_.RecomputeBasicVariableValues();
3163 variable_values_.RecomputeDualPrices(
3164 parameters_.dual_price_prioritize_norm());
3165
3166 // Computing the objective at each iteration takes time, so we just
3167 // check the limit when the basis is refactorized.
3168 //
3169 // Hack: We need phase_ here and not the local feasibility_phase
3170 // variable because this must not be checked for the dual phase I algo
3171 // that use the same code as the dual phase II (i.e. the local
3172 // feasibility_phase will be false).
3173 if (phase_ == Phase::OPTIMIZATION &&
3174 dual_objective_limit_ != kInfinity &&
3175 ComputeObjectiveValue() > dual_objective_limit_) {
3176 SOLVER_LOG(logger_,
3177 "Stopping the dual simplex because"
3178 " the objective limit ",
3179 dual_objective_limit_, " has been reached.");
3180 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3181 objective_limit_reached_ = true;
3182 return Status::OK();
3183 }
3184 }
3185
3186 DisplayIterationInfo(/*primal=*/false, last_refactorization_reason_);
3187 last_refactorization_reason_ = RefactorizationReason::DEFAULT;
3188 } else {
3189 // Updates from the previous iteration that can be skipped if we
3190 // recompute everything (see other case above).
3191 if (!feasibility_phase) {
3192 // Make sure the boxed variables are dual-feasible before choosing the
3193 // leaving variable row.
3194 MakeBoxedVariableDualFeasible(bound_flip_candidates_,
3195 /*update_basic_values=*/true);
3196 bound_flip_candidates_.clear();
3197
3198 // The direction_.non_zeros contains the positions for which the basic
3199 // variable value was changed during the previous iterations.
3200 variable_values_.UpdateDualPrices(direction_.non_zeros);
3201 }
3202 }
3203
3204 if (feasibility_phase) {
3205 GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
3206 &leaving_row, &cost_variation, &target_bound));
3207 } else {
3208 GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
3209 &leaving_row, &cost_variation, &target_bound));
3210 }
3211 if (leaving_row == kInvalidRow) {
3212 // TODO(user): integrate this with the main "re-optimization" loop.
3213 // Also distinguish cost perturbation and shifts?
3214 if (!basis_factorization_.IsRefactorized() ||
3215 reduced_costs_.HasCostShift()) {
3216 VLOG(1) << "Optimal reached, double checking.";
3217 reduced_costs_.ClearAndRemoveCostShifts();
3218 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3219 refactorize = true;
3220 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3221 continue;
3222 }
3223 if (feasibility_phase) {
3224 // Note that since the basis is refactorized, the variable values
3225 // will be recomputed at the beginning of the second phase. The boxed
3226 // variable values will also be corrected by
3227 // MakeBoxedVariableDualFeasible().
3228 if (num_dual_infeasible_positions_ == 0) {
3229 problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3230 } else {
3231 VLOG(1) << "DUAL infeasible in dual phase I.";
3232 problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
3233 }
3234 } else {
3235 problem_status_ = ProblemStatus::OPTIMAL;
3236 }
3237 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3238 return Status::OK();
3239 }
3240
3241 update_row_.ComputeUnitRowLeftInverse(leaving_row);
3242 if (!dual_edge_norms_.TestPrecision(leaving_row,
3243 update_row_.GetUnitRowLeftInverse())) {
3244 // We rechoose a potentially different leaving row. Note that if we choose
3245 // the same, we shouldn't go back here since the norm will now pass the
3246 // test.
3247 if (feasibility_phase) {
3248 const Fractional price = dual_pricing_vector_[leaving_row];
3249 const DenseColumn::ConstView squared_norms =
3250 dual_edge_norms_.GetEdgeSquaredNorms();
3251 dual_prices_.AddOrUpdate(leaving_row,
3252 Square(price) / squared_norms[leaving_row]);
3253 } else {
3254 variable_values_.UpdateDualPrices({leaving_row});
3255 }
3256 continue;
3257 }
3258 update_row_.ComputeUpdateRow(leaving_row);
3259
3260 if (feasibility_phase) {
3262 reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3263 &entering_col));
3264 } else {
3266 reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3267 &bound_flip_candidates_, &entering_col));
3268 }
3269
3270 // No entering_col: dual unbounded (i.e. primal infeasible).
3271 if (entering_col == kInvalidCol) {
3272 if (!reduced_costs_.AreReducedCostsPrecise()) {
3273 VLOG(1) << "No entering column. Double checking...";
3274 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3275 refactorize = true;
3276 last_refactorization_reason_ = RefactorizationReason::FINAL_CHECK;
3277 continue;
3278 }
3279 DCHECK(basis_factorization_.IsRefactorized());
3280 if (feasibility_phase) {
3281 // This shouldn't happen by construction.
3282 VLOG(1) << "Unbounded dual feasibility problem !?";
3283 problem_status_ = ProblemStatus::ABNORMAL;
3284 } else {
3285 problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
3286 solution_dual_ray_ =
3287 Transpose(update_row_.GetUnitRowLeftInverse().values);
3288 update_row_.ComputeFullUpdateRow(leaving_row,
3289 &solution_dual_ray_row_combination_);
3290 if (cost_variation < 0) {
3291 ChangeSign(&solution_dual_ray_);
3292 ChangeSign(&solution_dual_ray_row_combination_);
3293 }
3294 }
3295 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3296 return Status::OK();
3297 }
3298
3299 // If the coefficient is too small, we recompute the reduced costs if not
3300 // already done. This is an extra heuristic to avoid computing the direction
3301 // If the pivot is small. But the real recomputation step is just below.
3302 const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
3303 if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
3304 !reduced_costs_.AreReducedCostsPrecise()) {
3305 VLOG(1) << "Trying not to pivot by " << entering_coeff;
3306 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3307 refactorize = true;
3308 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
3309 continue;
3310 }
3311
3312 ComputeDirection(entering_col);
3313
3314 // If the pivot is small compared to others in the direction_ vector we try
3315 // to recompute everything. If we cannot, then note that
3316 // DualChooseEnteringColumn() should guaranteed that the pivot is not too
3317 // small when everything has already been recomputed.
3318 if (std::abs(direction_[leaving_row]) <
3319 parameters_.small_pivot_threshold() * direction_infinity_norm_) {
3320 if (!reduced_costs_.AreReducedCostsPrecise()) {
3321 VLOG(1) << "Trying not pivot by " << entering_coeff << " ("
3322 << direction_[leaving_row]
3323 << ") because the direction has a norm of "
3324 << direction_infinity_norm_;
3325 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3326 refactorize = true;
3327 last_refactorization_reason_ = RefactorizationReason::SMALL_PIVOT;
3328 continue;
3329 }
3330 }
3331
3332 // This is to avoid crashes below. It seems to happen rarely on a few miplib
3333 // problem like rmine11.pb.gz in multithread.
3334 //
3335 // TODO(user): Try to recover instead.
3336 if (std::abs(direction_[leaving_row]) <= 1e-20) {
3337 const std::string error_message = absl::StrCat(
3338 "trying to pivot with number too small: ", direction_[leaving_row]);
3339 SOLVER_LOG(logger_, error_message);
3340 return Status(Status::ERROR_LU, error_message);
3341 }
3342
3343 // This test takes place after the check for optimality/feasibility because
3344 // when running with 0 iterations, we still want to report
3345 // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
3346 // case at the beginning of the algorithm.
3347 if (num_iterations_ == parameters_.max_number_of_iterations()) {
3348 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3349 return Status::OK();
3350 }
3351
3352 // Before we update the reduced costs, if its sign is already dual
3353 // infeasible and the update direction will make it worse we make sure the
3354 // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that
3355 // goes in the wrong direction (a few experiments seems to indicate that
3356 // this is not a good idea). See comment at the top of UpdateReducedCosts().
3357 //
3358 // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in
3359 // order to do a non-zero step. This helps on degenerate problems. Like the
3360 // pertubation, we will remove all these shifts at the end.
3361 const bool increasing_rc_is_needed =
3362 (cost_variation > 0.0) == (entering_coeff > 0.0);
3363 reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col);
3364
3366 if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed,
3367 entering_col)) {
3368 timer.AlsoUpdate(&iteration_stats_.degenerate);
3369 } else {
3370 timer.AlsoUpdate(&iteration_stats_.normal);
3371 }
3372 });
3373
3374 // Update basis. Note that direction_ is already computed.
3375 //
3376 // TODO(user): this is pretty much the same in the primal or dual code.
3377 // We just need to know to what bound the leaving variable will be set to.
3378 // Factorize more common code?
3379 reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
3380 &update_row_);
3381 dual_edge_norms_.UpdateBeforeBasisPivot(
3382 entering_col, leaving_row, direction_,
3383 update_row_.GetUnitRowLeftInverse());
3384
3385 // During phase I, we do not need the basic variable values at all.
3386 // Important: The norm should be updated before that.
3387 Fractional primal_step = 0.0;
3388 if (feasibility_phase) {
3389 DualPhaseIUpdatePrice(leaving_row, entering_col);
3390 } else {
3391 primal_step =
3392 ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3393 variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
3394 }
3395
3396 // It is important to do the actual pivot after the update above!
3397 const ColIndex leaving_col = basis_[leaving_row];
3399 UpdateAndPivot(entering_col, leaving_row, target_bound));
3400
3401 // This makes sure the leaving variable is at its exact bound. Tests
3402 // indicate that this makes everything more stable. Note also that during
3403 // the feasibility phase, the variable values are not used, but that the
3404 // correct non-basic variable value are needed at the end.
3405 variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3406
3407 ++num_iterations_;
3408 }
3409 return Status::OK();
3410}
3411
3412Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) {
3414 Cleanup update_deterministic_time_on_return(
3415 [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3416 bool refactorize = false;
3417
3418 // We clear all the quantities that we don't update so they will be recomputed
3419 // later if needed.
3420 primal_edge_norms_.Clear();
3421 dual_edge_norms_.Clear();
3422 update_row_.Invalidate();
3423 reduced_costs_.ClearAndRemoveCostShifts();
3424
3425 std::vector<ColIndex> super_basic_cols;
3426 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3427 if (variables_info_.GetStatusRow()[col] == VariableStatus::FREE &&
3428 variable_values_.Get(col) != 0) {
3429 super_basic_cols.push_back(col);
3430 }
3431 }
3432
3433 while (!super_basic_cols.empty()) {
3434 AdvanceDeterministicTime(time_limit);
3435 if (time_limit->LimitReached()) break;
3436
3438 ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
3439 GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3440 if (basis_factorization_.IsRefactorized()) {
3441 CorrectErrorsOnVariableValues();
3442 DisplayIterationInfo(/*primal=*/true);
3443 }
3444
3445 // TODO(user): Select at random like in Polish().
3446 ColIndex entering_col = super_basic_cols.back();
3447
3448 DCHECK(variables_info_.GetCanDecreaseBitRow()[entering_col]);
3449 DCHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
3450
3451 // Decide which direction to send the entering column.
3452 // UNCONSTRAINED variables go towards zero. Other variables go towards their
3453 // closest bound. We assume that we're at an optimal solution, so all FREE
3454 // variables have approximately zero reduced cost, which means that the
3455 // objective value won't change from moving this column into the basis.
3456 // TODO(user): As an improvement for variables with two bounds, try both
3457 // and pick one that doesn't require a basis change (if possible), otherwise
3458 // pick the closer bound.
3459 Fractional fake_rc;
3460 const Fractional entering_value = variable_values_.Get(entering_col);
3461 if (variables_info_.GetTypeRow()[entering_col] ==
3463 if (entering_value > 0) {
3464 fake_rc = 1.0;
3465 } else {
3466 fake_rc = -1.0;
3467 }
3468 } else {
3469 const Fractional diff_ub =
3470 variables_info_.GetVariableUpperBounds()[entering_col] -
3471 entering_value;
3472 const Fractional diff_lb =
3473 entering_value -
3474 variables_info_.GetVariableLowerBounds()[entering_col];
3475 if (diff_lb <= diff_ub) {
3476 fake_rc = 1.0;
3477 } else {
3478 fake_rc = -1.0;
3479 }
3480 }
3481
3482 // Solve the system B.d = a with a the entering column.
3483 ComputeDirection(entering_col);
3484
3485 Fractional step_length;
3486 RowIndex leaving_row;
3488
3489 GLOP_RETURN_IF_ERROR(ChooseLeavingVariableRow(entering_col, fake_rc,
3490 &refactorize, &leaving_row,
3491 &step_length, &target_bound));
3492
3493 if (refactorize) continue;
3494
3495 // At this point, we know the iteration will finish or stop with an error.
3496 super_basic_cols.pop_back();
3497
3498 if (step_length == kInfinity || step_length == -kInfinity) {
3499 if (variables_info_.GetTypeRow()[entering_col] ==
3501 step_length = std::fabs(entering_value);
3502 } else {
3503 VLOG(1) << "Infinite step for bounded variable ?!";
3504 problem_status_ = ProblemStatus::ABNORMAL;
3505 break;
3506 }
3507 }
3508
3509 const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
3510
3511 // Store the leaving_col before basis_ change.
3512 const ColIndex leaving_col =
3513 (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3514
3515 // An iteration is called 'degenerate' if the leaving variable is already
3516 // primal-infeasible and we make it even more infeasible or if we do a zero
3517 // step.
3518 // TODO(user): Test setting the step size to zero for degenerate steps.
3519 // We don't need to force a positive step because each super-basic variable
3520 // is pivoted in exactly once.
3521 bool is_degenerate = false;
3522 if (leaving_row != kInvalidRow) {
3523 Fractional dir = -direction_[leaving_row] * step;
3524 is_degenerate =
3525 (dir == 0.0) ||
3526 (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3527 (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3528
3529 // If the iteration is not degenerate, the leaving variable should go to
3530 // its exact target bound (it is how the step is computed).
3531 if (!is_degenerate) {
3532 DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3533 target_bound));
3534 }
3535 }
3536
3537 variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3538 if (leaving_row != kInvalidRow) {
3539 if (!is_degenerate) {
3540 // On a non-degenerate iteration, the leaving variable should be at its
3541 // exact bound. This corrects an eventual small numerical error since
3542 // 'value + direction * step' where step is
3543 // '(target_bound - value) / direction'
3544 // may be slighlty different from target_bound.
3545 variable_values_.Set(leaving_col, target_bound);
3546 }
3548 UpdateAndPivot(entering_col, leaving_row, target_bound));
3550 if (is_degenerate) {
3551 timer.AlsoUpdate(&iteration_stats_.degenerate);
3552 } else {
3553 timer.AlsoUpdate(&iteration_stats_.normal);
3554 }
3555 });
3556 } else {
3557 // Snap the super-basic variable to its bound. Note that
3558 // variable_values_.UpdateOnPivoting() should already be close to that but
3559 // here we make sure it is exact and remove any small numerical errors.
3560 if (variables_info_.GetTypeRow()[entering_col] ==
3562 variable_values_.Set(entering_col, 0.0);
3563 } else if (step > 0.0) {
3564 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3566 } else if (step < 0.0) {
3567 SetNonBasicVariableStatusAndDeriveValue(entering_col,
3569 }
3570 IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3571 }
3572
3573 ++num_iterations_;
3574 }
3575
3576 if (!super_basic_cols.empty()) {
3577 SOLVER_LOG(logger_, "Push terminated early with ", super_basic_cols.size(),
3578 " super-basic variables remaining.");
3579 }
3580
3581 // TODO(user): What status should be returned if the time limit is hit?
3582 // If the optimization phase finished, then OPTIMAL is technically correct
3583 // but also misleading.
3584
3585 return Status::OK();
3586}
3587
3588ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3590 return first_slack_col_ + RowToColIndex(row);
3591}
3592
3594 std::string result;
3595 result.append(iteration_stats_.StatString());
3596 result.append(ratio_test_stats_.StatString());
3597 result.append(entering_variable_.StatString());
3598 result.append(dual_prices_.StatString());
3599 result.append(reduced_costs_.StatString());
3600 result.append(variable_values_.StatString());
3601 result.append(primal_edge_norms_.StatString());
3602 result.append(dual_edge_norms_.StatString());
3603 result.append(update_row_.StatString());
3604 result.append(basis_factorization_.StatString());
3605 result.append(function_stats_.StatString());
3606 return result;
3607}
3608
3609void RevisedSimplex::DisplayAllStats() {
3610 if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3611 absl::FPrintF(stderr, "%s", StatString());
3612 absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3613 }
3614}
3615
3616Fractional RevisedSimplex::ComputeObjectiveValue() const {
3617 SCOPED_TIME_STAT(&function_stats_);
3618 return PreciseScalarProduct(objective_,
3619 Transpose(variable_values_.GetDenseRow()));
3620}
3621
3622Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3623 SCOPED_TIME_STAT(&function_stats_);
3624 const Fractional sum = PreciseScalarProduct(
3625 objective_, Transpose(variable_values_.GetDenseRow()));
3626 return objective_scaling_factor_ * (sum + objective_offset_);
3627}
3628
3629void RevisedSimplex::SetParameters(const GlopParameters& parameters) {
3630 SCOPED_TIME_STAT(&function_stats_);
3631 deterministic_random_.seed(parameters.random_seed());
3632 absl_random_ = absl::BitGen(absl::SeedSeq({parameters.random_seed()}));
3633 initial_parameters_ = parameters;
3634 parameters_ = parameters;
3635 PropagateParameters();
3636}
3637
3638void RevisedSimplex::PropagateParameters() {
3639 SCOPED_TIME_STAT(&function_stats_);
3640 basis_factorization_.SetParameters(parameters_);
3641 entering_variable_.SetParameters(parameters_);
3642 reduced_costs_.SetParameters(parameters_);
3643 dual_edge_norms_.SetParameters(parameters_);
3644 primal_edge_norms_.SetParameters(parameters_);
3645 update_row_.SetParameters(parameters_);
3646}
3647
3648void RevisedSimplex::DisplayIterationInfo(bool primal,
3649 RefactorizationReason reason) {
3650 if (!logger_->LoggingIsEnabled()) return;
3651 const std::string first_word = primal ? "Primal " : "Dual ";
3652
3653 // We display the info on each re-factorization, and it is nice to show what
3654 // triggered the issue. Note that we don't display normal refactorization when
3655 // we decide that it is worth it for the solve time or we reach the fixed
3656 // refactorization period.
3657 std::string info;
3658 if (reason != RefactorizationReason::DEFAULT) {
3659 switch (reason) {
3660 case RefactorizationReason::DEFAULT:
3661 info = " [default]";
3662 break;
3663 case RefactorizationReason::SMALL_PIVOT:
3664 info = " [small pivot]";
3665 break;
3666 case RefactorizationReason::IMPRECISE_PIVOT:
3667 info = " [imprecise pivot]";
3668 break;
3669 case RefactorizationReason::NORM:
3670 info = " [norms]";
3671 break;
3672 case RefactorizationReason::RC:
3673 info = " [reduced costs]";
3674 break;
3675 case RefactorizationReason::VAR_VALUES:
3676 info = " [var values]";
3677 break;
3678 case RefactorizationReason::FINAL_CHECK:
3679 info = " [check]";
3680 break;
3681 }
3682 }
3683
3684 switch (phase_) {
3685 case Phase::FEASIBILITY: {
3686 const int64_t iter = num_iterations_;
3687 std::string name;
3688 Fractional objective;
3689 if (parameters_.use_dual_simplex()) {
3690 if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
3691 objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
3692 } else {
3693 // The internal objective of the transformed problem is the negation
3694 // of the sum of the dual infeasibility of the original problem.
3695 objective = -PreciseScalarProduct(
3696 objective_, Transpose(variable_values_.GetDenseRow()));
3697 }
3698 name = "sum_dual_infeasibilities";
3699 } else {
3700 objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
3701 name = "sum_primal_infeasibilities";
3702 }
3703
3704 SOLVER_LOG(logger_, first_word, "feasibility phase, iteration # ", iter,
3705 ", ", name, " = ", absl::StrFormat("%.15E", objective), info);
3706 break;
3707 }
3708 case Phase::OPTIMIZATION: {
3709 const int64_t iter = num_iterations_ - num_feasibility_iterations_;
3710 // Note that in the dual phase II, ComputeObjectiveValue() is also
3711 // computing the dual objective even if it uses the variable values.
3712 // This is because if we modify the bounds to make the problem
3713 // primal-feasible, we are at the optimal and hence the two objectives
3714 // are the same.
3715 const Fractional objective = ComputeInitialProblemObjectiveValue();
3716 SOLVER_LOG(logger_, first_word, "optimization phase, iteration # ", iter,
3717 ", objective = ", absl::StrFormat("%.15E", objective), info);
3718 break;
3719 }
3720 case Phase::PUSH: {
3721 const int64_t iter = num_iterations_ - num_feasibility_iterations_ -
3722 num_optimization_iterations_;
3723 SOLVER_LOG(logger_, first_word, "push phase, iteration # ", iter,
3724 ", remaining_variables_to_push = ",
3725 ComputeNumberOfSuperBasicVariables(), info);
3726 }
3727 }
3728}
3729
3730void RevisedSimplex::DisplayErrors() {
3731 if (!logger_->LoggingIsEnabled()) return;
3732 SOLVER_LOG(logger_,
3733 "Current status: ", GetProblemStatusString(problem_status_));
3734 SOLVER_LOG(logger_, "Primal infeasibility (bounds) = ",
3735 variable_values_.ComputeMaximumPrimalInfeasibility());
3736 SOLVER_LOG(logger_, "Primal residual |A.x - b| = ",
3737 variable_values_.ComputeMaximumPrimalResidual());
3738 SOLVER_LOG(logger_, "Dual infeasibility (reduced costs) = ",
3739 reduced_costs_.ComputeMaximumDualInfeasibility());
3740 SOLVER_LOG(logger_, "Dual residual |c_B - y.B| = ",
3741 reduced_costs_.ComputeMaximumDualResidual());
3742}
3743
3744namespace {
3745
3746std::string StringifyMonomialWithFlags(const Fractional a,
3747 absl::string_view x) {
3748 return StringifyMonomial(
3749 a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3750}
3751
3752// Returns a string representing the rational approximation of x or a decimal
3753// approximation of x according to
3754// absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3755std::string StringifyWithFlags(const Fractional x) {
3756 return Stringify(x,
3757 absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3758}
3759
3760} // namespace
3761
3762std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3763 std::string output;
3764 VariableType variable_type = variables_info_.GetTypeRow()[col];
3765 VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3766 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3767 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3768 absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3769 variable_name_[col],
3770 StringifyWithFlags(variable_values_.Get(col)),
3771 GetVariableStatusString(variable_status),
3772 GetVariableTypeString(variable_type),
3773 StringifyWithFlags(lower_bounds[col]),
3774 StringifyWithFlags(upper_bounds[col]));
3775 return output;
3776}
3777
3778void RevisedSimplex::DisplayInfoOnVariables() const {
3779 if (VLOG_IS_ON(3)) {
3780 for (ColIndex col(0); col < num_cols_; ++col) {
3781 const Fractional variable_value = variable_values_.Get(col);
3782 const Fractional objective_coefficient = objective_[col];
3783 const Fractional objective_contribution =
3784 objective_coefficient * variable_value;
3785 VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3786 << StringifyWithFlags(variable_value) << " * "
3787 << StringifyWithFlags(objective_coefficient)
3788 << "(obj) = " << StringifyWithFlags(objective_contribution);
3789 }
3790 VLOG(3) << "------";
3791 }
3792}
3793
3794void RevisedSimplex::DisplayVariableBounds() {
3795 if (VLOG_IS_ON(3)) {
3796 const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3797 const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3798 const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3799 for (ColIndex col(0); col < num_cols_; ++col) {
3800 switch (variable_type[col]) {
3802 break;
3804 VLOG(3) << variable_name_[col]
3805 << " >= " << StringifyWithFlags(lower_bounds[col]) << ";";
3806 break;
3808 VLOG(3) << variable_name_[col]
3809 << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3810 break;
3812 VLOG(3) << StringifyWithFlags(lower_bounds[col])
3813 << " <= " << variable_name_[col]
3814 << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3815 break;
3817 VLOG(3) << variable_name_[col] << " = "
3818 << StringifyWithFlags(lower_bounds[col]) << ";";
3819 break;
3820 default: // This should never happen.
3821 LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3822 break;
3823 }
3824 }
3825 }
3826}
3827
3830 util_intops::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3831 for (ColIndex col(0); col < num_cols_; ++col) {
3832 ComputeDirection(col);
3833 for (const auto e : direction_) {
3834 if (column_scales == nullptr) {
3835 dictionary[e.row()].SetCoefficient(col, e.coefficient());
3836 continue;
3837 }
3838 const Fractional numerator =
3839 col < column_scales->size() ? (*column_scales)[col] : 1.0;
3840 const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3841 ? (*column_scales)[GetBasis(e.row())]
3842 : 1.0;
3843 dictionary[e.row()].SetCoefficient(
3844 col, direction_[e.row()] * (numerator / denominator));
3845 }
3846 }
3847 return dictionary;
3848}
3849
3851 const LinearProgram& linear_program, const BasisState& state) {
3852 LoadStateForNextSolve(state);
3853 Status status = Initialize(linear_program);
3854 if (status.ok()) {
3855 variable_values_.RecomputeBasicVariableValues();
3856 solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3857 }
3858}
3859
3860void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3861 if (VLOG_IS_ON(3)) {
3862 // This function has a complexity in O(num_non_zeros_in_matrix).
3863 DisplayInfoOnVariables();
3864
3865 std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3866 const DenseRow::ConstView reduced_costs = reduced_costs_.GetReducedCosts();
3867 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3868 absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3869 variable_name_[col]));
3870 }
3871 VLOG(3) << output << ";";
3872
3873 const RevisedSimplexDictionary dictionary(nullptr, this);
3874 RowIndex r(0);
3875 for (const SparseRow& row : dictionary) {
3876 output.clear();
3877 ColIndex basic_col = basis_[r];
3878 absl::StrAppend(&output, variable_name_[basic_col], " = ",
3879 StringifyWithFlags(variable_values_.Get(basic_col)));
3880 for (const SparseRowEntry e : row) {
3881 if (e.col() != basic_col) {
3882 absl::StrAppend(&output,
3883 StringifyMonomialWithFlags(e.coefficient(),
3884 variable_name_[e.col()]));
3885 }
3886 }
3887 VLOG(3) << output << ";";
3888 }
3889 VLOG(3) << "------";
3890 DisplayVariableBounds();
3891 ++r;
3892 }
3893}
3894
3895void RevisedSimplex::DisplayProblem() const {
3896 // This function has a complexity in O(num_rows * num_cols *
3897 // num_non_zeros_in_row).
3898 if (VLOG_IS_ON(3)) {
3899 DisplayInfoOnVariables();
3900 std::string output = "min: ";
3901 bool has_objective = false;
3902 for (ColIndex col(0); col < num_cols_; ++col) {
3903 const Fractional coeff = objective_[col];
3904 has_objective |= (coeff != 0.0);
3905 absl::StrAppend(&output,
3906 StringifyMonomialWithFlags(coeff, variable_name_[col]));
3907 }
3908 if (!has_objective) {
3909 absl::StrAppend(&output, " 0");
3910 }
3911 VLOG(3) << output << ";";
3912 for (RowIndex row(0); row < num_rows_; ++row) {
3913 output = "";
3914 for (ColIndex col(0); col < num_cols_; ++col) {
3915 absl::StrAppend(&output,
3916 StringifyMonomialWithFlags(
3917 compact_matrix_.column(col).LookUpCoefficient(row),
3918 variable_name_[col]));
3919 }
3920 VLOG(3) << output << " = 0;";
3921 }
3922 VLOG(3) << "------";
3923 }
3924}
3925
3926void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3927 DCHECK(time_limit != nullptr);
3928 const double current_deterministic_time = DeterministicTime();
3929 const double deterministic_time_delta =
3930 current_deterministic_time - last_deterministic_time_update_;
3931 time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3932 last_deterministic_time_update_ = current_deterministic_time;
3933}
3934
3935#undef DCHECK_COL_BOUNDS
3936#undef DCHECK_ROW_BOUNDS
3937
3938} // namespace glop
3939} // namespace operations_research
bool IsSet(IndexType i) const
Returns true if the bit at position i is set.
Definition bitset.h:538
ConstView const_view() const
Definition bitset.h:464
void EnableLogging(bool enable)
Definition logging.h:46
void SetLogToStdOut(bool enable)
Should all messages be displayed on stdout ?
Definition logging.h:52
bool LoggingIsEnabled() const
Returns true iff logging is enabled.
Definition logging.h:49
std::string StatString() const
Definition stats.cc:77
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
void SetParameters(const GlopParameters &parameters)
Sets the parameters for this component.
const ColumnPermutation & GetColumnPermutation() const
int NumUpdates() const
Returns the number of updates since last refactorization.
void RightSolve(ScatteredColumn *d) const
Right solves the system B.d = a where the input is the initial value of d.
RowToColMapping ComputeInitialBasis(const std::vector< ColIndex > &candidates)
bool IsRefactorized() const
Returns true if the factorization was just recomputed.
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
Fractional LookUpCoefficient(RowIndex index) const
RowIndex EntryRow(EntryIndex i) const
Fractional EntryCoefficient(EntryIndex i) const
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition sparse.h:473
void PopulateFromMatrixView(const MatrixView &input)
Definition sparse.cc:446
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn::View dense_column) const
Definition sparse.h:441
void PopulateFromSparseMatrixAndAddSlacks(const SparseMatrix &input)
Definition sparse.cc:465
EntryIndex num_entries() const
Returns the matrix dimensions. See same functions in SparseMatrix.
Definition sparse.h:401
bool IsEmpty() const
Returns whether or not this matrix contains any non-zero entries.
Definition sparse.h:409
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition sparse.h:434
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition sparse.cc:492
ColumnView column(ColIndex col) const
Definition sparse.h:416
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition sparse.h:459
DenseColumn::ConstView GetEdgeSquaredNorms()
std::string StatString() const
Stats related functions.
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
Updates the norms if the columns of the basis where permuted.
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
bool TestPrecision(RowIndex leaving_row, const ScatteredRow &unit_row_left_inverse)
std::string StatString() const
Returns some stats about this class if they are enabled.
Definition pricing.h:98
void DenseAddOrUpdate(Index position, Fractional value)
Definition pricing.h:181
void Remove(Index position)
Removes the given index from the set of candidates.
Definition pricing.h:169
void AddOrUpdate(Index position, Fractional value)
Definition pricing.h:190
void SetParameters(const GlopParameters &parameters)
Sets the parameters.
std::string StatString() const
Stats related functions.
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col)
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col)
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
std::string StatString() const
Returns a string with statistics about this class.
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
double DeterministicTime() const
Deterministic time used by the scalar product computation of this class.
void SetPricingRule(GlopParameters::PricingRule rule)
This changes what GetSquaredNorms() returns.
bool TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
void RecomputePriceAt(ColIndex col)
Triggers a recomputation of the price at the given column only.
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
std::string StatString() const
Stats related functions.
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
Fractional GetDualFeasibilityTolerance() const
Returns the current dual feasibility tolerance.
void SetParameters(const GlopParameters &parameters)
Sets the pricing parameters. This does not change the pricing rule.
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)
Fractional TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
Does basic checking of an entering candidate.
void UpdateDataOnBasisPermutation()
Invalidates the data that depends on the order of the column in basis_.
const DenseColumn & GetDualValues()
Returns the dual values associated to the current basis.
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
double DeterministicTime() const
The deterministic time used by this class.
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
void ResetForNewObjective()
Invalidates all internal structure that depends on the objective function.
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
ConstraintStatus GetConstraintStatus(RowIndex row) const
void LoadStateForNextSolve(const BasisState &state)
Uses the given state as a warm-start for the next Solve() call.
Fractional GetDualValue(RowIndex row) const
Fractional GetReducedCost(ColIndex col) const
Fractional GetConstraintActivity(RowIndex row) const
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
const DenseRow & GetDualRayRowCombination() const
This is the "dual ray" linear combination of the matrix rows.
RowIndex GetProblemNumRows() const
Getters to retrieve all the information computed by the last Solve().
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
std::string StatString()
Returns statistics about this class as a string.
const BasisFactorization & GetBasisFactorization() const
VariableStatus GetVariableStatus(ColIndex col) const
void SetParameters(const GlopParameters &parameters)
Sets or gets the algorithm parameters to be used on the next Solve().
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
void SetStartingVariableValuesForNextSolve(const DenseRow &values)
static Status OK()
Improves readability but identical to 0-arg constructor.
Definition status.h:55
@ ERROR_LU
The LU factorization of the current basis couldn't be computed.
Definition status.h:34
StrictITISpan< RowIndex, const Fractional > ConstView
Definition lp_types.h:293
void SetParameters(const GlopParameters &parameters)
Sets the algorithm parameters.
void ComputeFullUpdateRow(RowIndex leaving_row, DenseRow *output) const
std::string StatString() const
Returns statistics about this class as a string.
Definition update_row.h:99
const ScatteredRow & GetUnitRowLeftInverse() const
Definition update_row.cc:58
void ComputeUnitRowLeftInverse(RowIndex leaving_row)
Definition update_row.cc:70
void ComputeUpdateRow(RowIndex leaving_row)
Definition update_row.cc:87
bool IsComputedFor(RowIndex leaving_row) const
Returns true if ComputeUpdateRow() was called since the last Invalidate().
Definition update_row.h:76
double DeterministicTime() const
Deterministic time used by the scalar product computation of this class.
Definition update_row.h:110
Fractional GetCoefficient(ColIndex col) const
Definition update_row.h:88
absl::Span< const ColIndex > GetNonZeroPositions() const
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
void Set(ColIndex col, Fractional value)
Sets the variable value of a given column.
std::string StatString() const
Parameters and stats functions.
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
void UpdateGivenNonBasicVariables(absl::Span< const ColIndex > cols_to_update, bool update_basic_variables)
void ResetAllNonBasicVariableValues(const DenseRow &free_initial_values)
void RecomputeDualPrices(bool put_more_importance_on_norm=false)
void UpdateDualPrices(absl::Span< const RowIndex > row)
Fractional Get(ColIndex col) const
Getters for the variable values.
void TransformToDualPhaseIProblem(Fractional dual_feasibility_tolerance, DenseRow::ConstView reduced_costs)
const DenseBitRow & GetCanDecreaseBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
const VariableStatusRow & GetStatusRow() const
const DenseRow & GetVariableLowerBounds() const
Returns the variable bounds.
const VariableTypeRow & GetTypeRow() const
const DenseRow & GetVariableUpperBounds() const
const DenseBitRow & GetNotBasicBitRow() const
Fractional GetBoundDifference(ColIndex col) const
Returns the distance between the upper and lower bound of the given column.
const DenseBitRow & GetIsRelevantBitRow() const
int ChangeUnusedBasicVariablesToFree(const RowToColMapping &basis)
const DenseBitRow & GetNonBasicBoxedVariables() const
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
void EndDualPhaseI(Fractional dual_feasibility_tolerance, DenseRow::ConstView reduced_costs)
bool LoadBoundsAndReturnTrueIfUnchanged(const DenseRow &new_lower_bounds, const DenseRow &new_upper_bounds)
void InitializeFromBasisState(ColIndex first_slack, ColIndex num_new_cols, const BasisState &state)
const DenseBitRow & GetIsBasicBitRow() const
int SnapFreeVariablesToBound(Fractional distance, const DenseRow &starting_values)
int64_t a
Definition table.cc:44
SatParameters parameters
const std::string name
A name for logging purposes.
int64_t value
absl::Status status
Definition g_gurobi.cc:44
double upper_bound
double lower_bound
time_limit
Definition solve.cc:22
int index
const bool DEBUG_MODE
Definition macros.h:24
ColIndex col
Definition markowitz.cc:187
RowIndex row
Definition markowitz.cc:186
constexpr ColIndex kInvalidCol(-1)
constexpr double kInfinity
Infinity for type Fractional.
Definition lp_types.h:89
std::string GetVariableTypeString(VariableType variable_type)
Returns the string representation of the VariableType enum.
Definition lp_types.cc:56
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition lp_types.h:396
Fractional Square(Fractional f)
Definition lp_utils.h:41
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Column of booleans.
Definition lp_types.h:385
Permutation< ColIndex > ColumnPermutation
Definition permutation.h:97
std::string Stringify(const Fractional x, bool fraction)
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:111
Bitset64< ColIndex > DenseBitRow
Row of bits.
Definition lp_types.h:377
const DenseRow & Transpose(const DenseColumn &col)
Similar comment as the other Transpose() implementation above.
Definition lp_utils.h:199
std::string GetVariableStatusString(VariableStatus status)
Returns the string representation of the VariableStatus enum.
Definition lp_types.cc:75
Index ColToIntIndex(ColIndex col)
Get the integer index corresponding to the col.
Definition lp_types.h:60
constexpr const uint64_t kDeterministicSeed
ColIndex RowToColIndex(RowIndex row)
Get the ColIndex corresponding to the column # row.
Definition lp_types.h:54
bool IsFinite(Fractional value)
Definition lp_types.h:96
constexpr RowIndex kInvalidRow(-1)
VariableType
Different types of variables.
Definition lp_types.h:180
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Row of variable types.
Definition lp_types.h:371
Fractional InfinityNorm(const DenseColumn &v)
Returns the maximum of the coefficients of 'v'.
Definition lp_utils.cc:151
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
Changes the sign of all the entries in the given vector.
Definition lp_utils.h:307
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Row of variable statuses.
Definition lp_types.h:374
std::string StringifyMonomial(const Fractional a, absl::string_view x, bool fraction)
StrictITIVector< ColIndex, Fractional > DenseRow
Row-vector types. Row-vector types are indexed by a column index.
Definition lp_types.h:353
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
ProblemStatus
Different statuses for a given problem.
Definition lp_types.h:107
@ ABNORMAL
An error occurred during the solving process.
@ INIT
The solver didn't had a chance to prove anything.
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Returns the ConstraintStatus corresponding to a given VariableStatus.
Definition lp_types.cc:113
static double DeterministicTimeForFpOperations(int64_t n)
Definition lp_types.h:435
std::string GetProblemStatusString(ProblemStatus problem_status)
Returns the string representation of the ProblemStatus enum.
Definition lp_types.cc:23
In SWIG mode, we don't want anything besides these top-level includes.
util_intops::StrongVector< ColumnEntryIndex, ElementIndex, ElementAllocator > SparseColumn
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition stats.h:414
STL namespace.
const Variable x
Definition qp_tests.cc:127
#define RETURN_IF_NULL(x)
Fractional coeff_magnitude
#define DCHECK_ROW_BOUNDS(row)
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
Fractional target_bound
#define DCHECK_COL_BOUNDS(col)
Fractional ratio
double distance
std::vector< double > lower_bounds
Definition lp_utils.cc:746
std::vector< double > upper_bounds
Definition lp_utils.cc:747
int var_index
Definition search.cc:3268
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
#define SCOPED_TIME_STAT(stats)
Definition stats.h:418
#define GLOP_RETURN_IF_ERROR(function_call)
Macro to simplify error propagation between function returning Status.
Definition status.h:71
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Macro to check that a pointer argument is not null.
Definition status.h:86
bool IsEmpty() const
Returns true if this state is empty.
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
void ClearSparseMask()
Efficiently clears the is_non_zero vector.
StrictITIVector< Index, Fractional > values
#define SOLVER_LOG(logger,...)
Definition logging.h:109