Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
lp_solver.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdlib>
19#include <memory>
20#include <string>
21
22#include "absl/flags/flag.h"
23#include "absl/log/check.h"
24#include "absl/strings/str_cat.h"
25#include "absl/strings/str_format.h"
26#include "google/protobuf/text_format.h"
29#include "ortools/glop/parameters.pb.h"
32#include "ortools/glop/status.h"
34#include "ortools/linear_solver/linear_solver.pb.h"
42
43#ifndef __PORTABLE_PLATFORM__
44// TODO(user): abstract this in some way to the port directory.
46#endif
47
48ABSL_FLAG(bool, lp_dump_to_proto_file, false,
49 "Tells whether do dump the problem to a protobuf file.");
50ABSL_FLAG(bool, lp_dump_compressed_file, true,
51 "Whether the proto dump file is compressed.");
52ABSL_FLAG(bool, lp_dump_binary_file, false,
53 "Whether the proto dump file is binary.");
54ABSL_FLAG(int, lp_dump_file_number, -1,
55 "Number for the dump file, in the form name-000048.pb. "
56 "If < 0, the file is automatically numbered from the number of "
57 "calls to LPSolver::Solve().");
58ABSL_FLAG(std::string, lp_dump_dir, "/tmp",
59 "Directory where dump files are written.");
60ABSL_FLAG(std::string, lp_dump_file_basename, "",
61 "Base name for dump files. LinearProgram::name_ is used if "
62 "lp_dump_file_basename is empty. If LinearProgram::name_ is "
63 "empty, \"linear_program_dump_file\" is used.");
64ABSL_FLAG(std::string, glop_params, "",
65 "Override any user parameters with the value of this flag. This is "
66 "interpreted as a GlopParameters proto in text format.");
67
68namespace operations_research {
69namespace glop {
70namespace {
71
72// Writes a LinearProgram to a file if FLAGS_lp_dump_to_proto_file is true. The
73// integer num is appended to the base name of the file. When this function is
74// called from LPSolver::Solve(), num is usually the number of times Solve() was
75// called. For a LinearProgram whose name is "LinPro", and num = 48, the default
76// output file will be /tmp/LinPro-000048.pb.gz.
77//
78// Warning: is a no-op on portable platforms (android, ios, etc).
79void DumpLinearProgramIfRequiredByFlags(const LinearProgram& linear_program,
80 int num) {
81 if (!absl::GetFlag(FLAGS_lp_dump_to_proto_file)) return;
82#ifdef __PORTABLE_PLATFORM__
83 LOG(WARNING) << "DumpLinearProgramIfRequiredByFlags(linear_program, num) "
84 "requested for linear_program.name()='"
85 << linear_program.name() << "', num=" << num
86 << " but is not implemented for this platform.";
87#else
88 std::string filename = absl::GetFlag(FLAGS_lp_dump_file_basename);
89 if (filename.empty()) {
90 if (linear_program.name().empty()) {
91 filename = "linear_program_dump";
92 } else {
93 filename = linear_program.name();
94 }
95 }
96 const int file_num = absl::GetFlag(FLAGS_lp_dump_file_number) >= 0
97 ? absl::GetFlag(FLAGS_lp_dump_file_number)
98 : num;
99 absl::StrAppendFormat(&filename, "-%06d.pb", file_num);
100 const std::string filespec =
101 absl::StrCat(absl::GetFlag(FLAGS_lp_dump_dir), "/", filename);
102 MPModelProto proto;
103 LinearProgramToMPModelProto(linear_program, &proto);
104 const ProtoWriteFormat write_format = absl::GetFlag(FLAGS_lp_dump_binary_file)
107 CHECK_OK(WriteProtoToFile(filespec, proto, write_format,
108 absl::GetFlag(FLAGS_lp_dump_compressed_file)));
109#endif
110}
111
112} // anonymous namespace
113
114// --------------------------------------------------------
115// LPSolver
116// --------------------------------------------------------
117
118LPSolver::LPSolver() : num_solves_(0) {}
119
120std::string LPSolver::GlopVersion() {
121 return absl::StrCat("Glop solver v", OrToolsVersionString());
123
124void LPSolver::SetParameters(const GlopParameters& parameters) {
125 parameters_ = parameters;
126#ifndef __PORTABLE_PLATFORM__
127 if (!absl::GetFlag(FLAGS_glop_params).empty()) {
128 GlopParameters flag_params;
129 CHECK(google::protobuf::TextFormat::ParseFromString(
130 absl::GetFlag(FLAGS_glop_params), &flag_params));
131 parameters_.MergeFrom(flag_params);
132 }
133#endif
134}
135
136const GlopParameters& LPSolver::GetParameters() const { return parameters_; }
137
138GlopParameters* LPSolver::GetMutableParameters() { return &parameters_; }
139
140SolverLogger& LPSolver::GetSolverLogger() { return logger_; }
141
142ProblemStatus LPSolver::Solve(const LinearProgram& lp) {
143 std::unique_ptr<TimeLimit> time_limit =
144 TimeLimit::FromParameters(parameters_);
145 return SolveWithTimeLimit(lp, time_limit.get());
146}
147
148ProblemStatus LPSolver::SolveWithTimeLimit(const LinearProgram& lp,
150 if (time_limit == nullptr) {
151 LOG(DFATAL) << "SolveWithTimeLimit() called with a nullptr time_limit.";
152 return ProblemStatus::ABNORMAL;
153 }
154 ++num_solves_;
155 num_revised_simplex_iterations_ = 0;
156 DumpLinearProgramIfRequiredByFlags(lp, num_solves_);
157
158 // Display a warning if running in non-opt, unless we're inside a unit test.
159 DLOG(WARNING)
160 << "\n******************************************************************"
161 "\n* WARNING: Glop will be very slow because it will use DCHECKs *"
162 "\n* to verify the results and the precision of the solver. *"
163 "\n* You can gain at least an order of magnitude speedup by *"
164 "\n* compiling with optimizations enabled and by defining NDEBUG. *"
165 "\n******************************************************************";
166
167 // Setup the logger.
168 logger_.EnableLogging(parameters_.log_search_progress());
169 logger_.SetLogToStdOut(parameters_.log_to_stdout());
170 if (!parameters_.log_search_progress() && VLOG_IS_ON(1)) {
171 logger_.EnableLogging(true);
172 logger_.SetLogToStdOut(false);
173 }
174
175 // Log some initial info about the input model.
176 if (logger_.LoggingIsEnabled()) {
177 SOLVER_LOG(&logger_, "");
178 SOLVER_LOG(&logger_, "Initial problem: ", lp.GetDimensionString());
179 SOLVER_LOG(&logger_, "Objective stats: ", lp.GetObjectiveStatsString());
180 SOLVER_LOG(&logger_, "Bounds stats: ", lp.GetBoundsStatsString());
181 SOLVER_LOG(&logger_, "Parameters: ", ProtobufShortDebugString(parameters_));
182 }
183
184 // Check some preconditions.
185 if (!lp.IsCleanedUp()) {
186 LOG(DFATAL) << "The columns of the given linear program should be ordered "
187 << "by row and contain no zero coefficients. Call CleanUp() "
188 << "on it before calling Solve().";
189 ResizeSolution(lp.num_constraints(), lp.num_variables());
190 return ProblemStatus::INVALID_PROBLEM;
191 }
192
193 // TODO(user): Unfortunately we are not really helpful with the error message
194 // here. We could do a better job. However most client should talk to glop via
195 // an input protocol buffer which should have better validation messages.
196 if (!lp.IsValid(parameters_.max_valid_magnitude())) {
197 SOLVER_LOG(&logger_,
198 "The given linear program is invalid. It contains NaNs, "
199 "coefficients too large or invalid bounds specification.");
200 ResizeSolution(lp.num_constraints(), lp.num_variables());
201 return ProblemStatus::INVALID_PROBLEM;
202 }
203
204 // Make an internal copy of the problem for the preprocessing.
205 current_linear_program_.PopulateFromLinearProgram(lp);
206
207 // Remove small entries even if presolve is off. This is mainly here to
208 // avoid floating point underflow. Keeping them can break many invariant like
209 // a * b == 0 iff a == 0 or b == 0.
210 //
211 // Note that our presolve/scaling can potentially create smaller entries than
212 // this, but the scale should stay reasonable.
213 //
214 // TODO(user): If speed matter, we could do that as we copy the program.
215 current_linear_program_.RemoveNearZeroEntries(parameters_.drop_magnitude());
216
217 // Preprocess.
218 MainLpPreprocessor preprocessor(&parameters_);
219 preprocessor.SetLogger(&logger_);
220 preprocessor.SetTimeLimit(time_limit);
221
222 const bool postsolve_is_needed = preprocessor.Run(&current_linear_program_);
223
224 if (logger_.LoggingIsEnabled()) {
225 SOLVER_LOG(&logger_, "");
226 SOLVER_LOG(&logger_, "Presolved problem: ",
227 current_linear_program_.GetDimensionString());
228 SOLVER_LOG(&logger_, "Objective stats: ",
229 current_linear_program_.GetObjectiveStatsString());
230 SOLVER_LOG(&logger_, "Bounds stats: ",
231 current_linear_program_.GetBoundsStatsString());
232 }
233
234 // At this point, we need to initialize a ProblemSolution with the correct
235 // size and status.
236 ProblemSolution solution(current_linear_program_.num_constraints(),
237 current_linear_program_.num_variables());
238 solution.status = preprocessor.status();
239 // LoadAndVerifySolution() below updates primal_values_, dual_values_,
240 // variable_statuses_ and constraint_statuses_ with the values stored in
241 // solution by RunPrimalDualPathFollowingMethodIfNeeded() and
242 // RunRevisedSimplexIfNeeded(), and hence clears any results stored in them
243 // from a previous run. In contrast, primal_ray_, constraints_dual_ray_, and
244 // variable_bounds_dual_ray_ are modified directly by
245 // RunRevisedSimplexIfNeeded(), so we explicitly clear them from previous run
246 // results.
247 primal_ray_.clear();
248 constraints_dual_ray_.clear();
249 variable_bounds_dual_ray_.clear();
250
251 // Do not launch the solver if the time limit was already reached. This might
252 // mean that the pre-processors were not all run, and current_linear_program_
253 // might not be in a completely safe state.
254 if (!time_limit->LimitReached()) {
255 RunRevisedSimplexIfNeeded(&solution, time_limit);
256 }
257 if (postsolve_is_needed) preprocessor.DestructiveRecoverSolution(&solution);
258 const ProblemStatus status = LoadAndVerifySolution(lp, solution);
259 // LOG some statistics that can be parsed by our benchmark script.
260 if (logger_.LoggingIsEnabled()) {
261 SOLVER_LOG(&logger_, "status: ", GetProblemStatusString(status));
262 SOLVER_LOG(&logger_, "objective: ", GetObjectiveValue());
263 SOLVER_LOG(&logger_, "iterations: ", GetNumberOfSimplexIterations());
264 SOLVER_LOG(&logger_, "time: ", time_limit->GetElapsedTime());
265 SOLVER_LOG(&logger_, "deterministic_time: ",
266 time_limit->GetElapsedDeterministicTime());
267 SOLVER_LOG(&logger_, "");
268 }
269
270 return status;
271}
272
273void LPSolver::Clear() {
274 ResizeSolution(RowIndex(0), ColIndex(0));
275 revised_simplex_.reset(nullptr);
276}
277
278void LPSolver::SetInitialBasis(
279 const VariableStatusRow& variable_statuses,
280 const ConstraintStatusColumn& constraint_statuses) {
281 // Create the associated basis state.
282 BasisState state;
283 state.statuses = variable_statuses;
284 for (const ConstraintStatus status : constraint_statuses) {
285 // Note the change of upper/lower bound between the status of a constraint
286 // and the status of its associated slack variable.
287 switch (status) {
288 case ConstraintStatus::FREE:
289 state.statuses.push_back(VariableStatus::FREE);
290 break;
291 case ConstraintStatus::AT_LOWER_BOUND:
292 state.statuses.push_back(VariableStatus::AT_UPPER_BOUND);
293 break;
294 case ConstraintStatus::AT_UPPER_BOUND:
295 state.statuses.push_back(VariableStatus::AT_LOWER_BOUND);
296 break;
297 case ConstraintStatus::FIXED_VALUE:
298 state.statuses.push_back(VariableStatus::FIXED_VALUE);
299 break;
300 case ConstraintStatus::BASIC:
301 state.statuses.push_back(VariableStatus::BASIC);
302 break;
303 }
304 }
305 if (revised_simplex_ == nullptr) {
306 revised_simplex_ = std::make_unique<RevisedSimplex>();
307 revised_simplex_->SetLogger(&logger_);
308 }
309 revised_simplex_->LoadStateForNextSolve(state);
310 if (parameters_.use_preprocessing()) {
311 LOG(WARNING) << "In GLOP, SetInitialBasis() was called but the parameter "
312 "use_preprocessing is true, this will likely not result in "
313 "what you want.";
314 }
315}
316
317namespace {
318// Computes the "real" problem objective from the one without offset nor
319// scaling.
320Fractional ProblemObjectiveValue(const LinearProgram& lp, Fractional value) {
321 return lp.objective_scaling_factor() * (value + lp.objective_offset());
322}
323
324// Returns the allowed error magnitude for something that should evaluate to
325// value under the given tolerance.
326Fractional AllowedError(Fractional tolerance, Fractional value) {
327 return tolerance * std::max(1.0, std::abs(value));
328}
329} // namespace
330
331// TODO(user): Try to also check the precision of an INFEASIBLE or UNBOUNDED
332// return status.
333ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram& lp,
334 const ProblemSolution& solution) {
335 SOLVER_LOG(&logger_, "");
336 SOLVER_LOG(&logger_, "Final unscaled solution:");
337
338 if (!IsProblemSolutionConsistent(lp, solution)) {
339 SOLVER_LOG(&logger_, "Inconsistency detected in the solution.");
340 ResizeSolution(lp.num_constraints(), lp.num_variables());
341 return ProblemStatus::ABNORMAL;
342 }
343
344 // Load the solution.
345 primal_values_ = solution.primal_values;
346 dual_values_ = solution.dual_values;
347 variable_statuses_ = solution.variable_statuses;
348 constraint_statuses_ = solution.constraint_statuses;
349
351
352 // Objective before eventually moving the primal/dual values inside their
353 // bounds.
354 ComputeReducedCosts(lp);
355 const Fractional primal_objective_value = ComputeObjective(lp);
356 const Fractional dual_objective_value = ComputeDualObjective(lp);
357 SOLVER_LOG(&logger_, "Primal objective (before moving primal/dual values) = ",
358 absl::StrFormat(
359 "%.15E", ProblemObjectiveValue(lp, primal_objective_value)));
360 SOLVER_LOG(&logger_, "Dual objective (before moving primal/dual values) = ",
361 absl::StrFormat("%.15E",
362 ProblemObjectiveValue(lp, dual_objective_value)));
363
364 // Eventually move the primal/dual values inside their bounds.
365 if (status == ProblemStatus::OPTIMAL &&
366 parameters_.provide_strong_optimal_guarantee()) {
367 MovePrimalValuesWithinBounds(lp);
368 MoveDualValuesWithinBounds(lp);
369 }
370
371 // The reported objective to the user.
372 problem_objective_value_ = ProblemObjectiveValue(lp, ComputeObjective(lp));
373 SOLVER_LOG(&logger_, "Primal objective (after moving primal/dual values) = ",
374 absl::StrFormat("%.15E", problem_objective_value_));
375
376 ComputeReducedCosts(lp);
377 ComputeConstraintActivities(lp);
378
379 // These will be set to true if the associated "infeasibility" is too large.
380 //
381 // The tolerance used is the parameter solution_feasibility_tolerance. To be
382 // somewhat independent of the original problem scaling, the thresholds used
383 // depend of the quantity involved and of its coordinates:
384 // - tolerance * max(1.0, abs(cost[col])) when a reduced cost is infeasible.
385 // - tolerance * max(1.0, abs(bound)) when a bound is crossed.
386 // - tolerance for an infeasible dual value (because the limit is always 0.0).
387 bool rhs_perturbation_is_too_large = false;
388 bool cost_perturbation_is_too_large = false;
389 bool primal_infeasibility_is_too_large = false;
390 bool dual_infeasibility_is_too_large = false;
391 bool primal_residual_is_too_large = false;
392 bool dual_residual_is_too_large = false;
393
394 // Computes all the infeasiblities and update the Booleans above.
395 ComputeMaxRhsPerturbationToEnforceOptimality(lp,
396 &rhs_perturbation_is_too_large);
397 ComputeMaxCostPerturbationToEnforceOptimality(
398 lp, &cost_perturbation_is_too_large);
399 const double primal_infeasibility =
400 ComputePrimalValueInfeasibility(lp, &primal_infeasibility_is_too_large);
401 const double dual_infeasibility =
402 ComputeDualValueInfeasibility(lp, &dual_infeasibility_is_too_large);
403 const double primal_residual =
404 ComputeActivityInfeasibility(lp, &primal_residual_is_too_large);
405 const double dual_residual =
406 ComputeReducedCostInfeasibility(lp, &dual_residual_is_too_large);
407
408 // TODO(user): the name is not really consistent since in practice those are
409 // the "residual" since the primal/dual infeasibility are zero when
410 // parameters_.provide_strong_optimal_guarantee() is true.
411 max_absolute_primal_infeasibility_ =
412 std::max(primal_infeasibility, primal_residual);
413 max_absolute_dual_infeasibility_ =
414 std::max(dual_infeasibility, dual_residual);
415 SOLVER_LOG(&logger_, "Max. primal infeasibility = ",
416 max_absolute_primal_infeasibility_);
417 SOLVER_LOG(&logger_,
418 "Max. dual infeasibility = ", max_absolute_dual_infeasibility_);
419
420 // Now that all the relevant quantities are computed, we check the precision
421 // and optimality of the result. See Chvatal pp. 61-62. If any of the tests
422 // fail, we return the IMPRECISE status.
423 const double objective_error_ub = ComputeMaxExpectedObjectiveError(lp);
424 SOLVER_LOG(&logger_, "Objective error <= ", objective_error_ub);
425
426 if (status == ProblemStatus::OPTIMAL &&
427 parameters_.provide_strong_optimal_guarantee()) {
428 // If the primal/dual values were moved to the bounds, then the primal/dual
429 // infeasibilities should be exactly zero (but not the residuals).
430 if (primal_infeasibility != 0.0 || dual_infeasibility != 0.0) {
431 LOG(ERROR) << "Primal/dual values have been moved to their bounds. "
432 << "Therefore the primal/dual infeasibilities should be "
433 << "exactly zero (but not the residuals). If this message "
434 << "appears, there is probably a bug in "
435 << "MovePrimalValuesWithinBounds() or in "
436 << "MoveDualValuesWithinBounds().";
437 }
438 if (rhs_perturbation_is_too_large) {
439 SOLVER_LOG(&logger_, "The needed rhs perturbation is too large !!");
440 if (parameters_.change_status_to_imprecise()) {
441 status = ProblemStatus::IMPRECISE;
442 }
443 }
444 if (cost_perturbation_is_too_large) {
445 SOLVER_LOG(&logger_, "The needed cost perturbation is too large !!");
446 if (parameters_.change_status_to_imprecise()) {
447 status = ProblemStatus::IMPRECISE;
448 }
449 }
450 }
451
452 // Note that we compare the values without offset nor scaling. We also need to
453 // compare them before we move the primal/dual values, otherwise we lose some
454 // precision since the values are modified independently of each other.
455 if (status == ProblemStatus::OPTIMAL) {
456 if (std::abs(primal_objective_value - dual_objective_value) >
457 objective_error_ub) {
458 SOLVER_LOG(&logger_,
459 "The objective gap of the final solution is too large.");
460 if (parameters_.change_status_to_imprecise()) {
461 status = ProblemStatus::IMPRECISE;
462 }
463 }
464 }
465 if ((status == ProblemStatus::OPTIMAL ||
466 status == ProblemStatus::PRIMAL_FEASIBLE) &&
467 (primal_residual_is_too_large || primal_infeasibility_is_too_large)) {
468 SOLVER_LOG(&logger_,
469 "The primal infeasibility of the final solution is too large.");
470 if (parameters_.change_status_to_imprecise()) {
471 status = ProblemStatus::IMPRECISE;
472 }
473 }
474 if ((status == ProblemStatus::OPTIMAL ||
475 status == ProblemStatus::DUAL_FEASIBLE) &&
476 (dual_residual_is_too_large || dual_infeasibility_is_too_large)) {
477 SOLVER_LOG(&logger_,
478 "The dual infeasibility of the final solution is too large.");
479 if (parameters_.change_status_to_imprecise()) {
480 status = ProblemStatus::IMPRECISE;
481 }
482 }
483
484 may_have_multiple_solutions_ =
485 (status == ProblemStatus::OPTIMAL) ? IsOptimalSolutionOnFacet(lp) : false;
486 return status;
487}
488
489bool LPSolver::IsOptimalSolutionOnFacet(const LinearProgram& lp) {
490 // Note(user): We use the following same two tolerances for the dual and
491 // primal values.
492 // TODO(user): investigate whether to use the tolerances defined in
493 // parameters.proto.
494 const double kReducedCostTolerance = 1e-9;
495 const double kBoundTolerance = 1e-7;
496 const ColIndex num_cols = lp.num_variables();
497 for (ColIndex col(0); col < num_cols; ++col) {
498 if (variable_statuses_[col] == VariableStatus::FIXED_VALUE) continue;
499 const Fractional lower_bound = lp.variable_lower_bounds()[col];
500 const Fractional upper_bound = lp.variable_upper_bounds()[col];
501 const Fractional value = primal_values_[col];
502 if (AreWithinAbsoluteTolerance(reduced_costs_[col], 0.0,
503 kReducedCostTolerance) &&
504 (AreWithinAbsoluteTolerance(value, lower_bound, kBoundTolerance) ||
505 AreWithinAbsoluteTolerance(value, upper_bound, kBoundTolerance))) {
506 return true;
507 }
508 }
509 const RowIndex num_rows = lp.num_constraints();
510 for (RowIndex row(0); row < num_rows; ++row) {
511 if (constraint_statuses_[row] == ConstraintStatus::FIXED_VALUE) continue;
512 const Fractional lower_bound = lp.constraint_lower_bounds()[row];
513 const Fractional upper_bound = lp.constraint_upper_bounds()[row];
514 const Fractional activity = constraint_activities_[row];
515 if (AreWithinAbsoluteTolerance(dual_values_[row], 0.0,
516 kReducedCostTolerance) &&
517 (AreWithinAbsoluteTolerance(activity, lower_bound, kBoundTolerance) ||
518 AreWithinAbsoluteTolerance(activity, upper_bound, kBoundTolerance))) {
519 return true;
520 }
521 }
522 return false;
523}
524
525Fractional LPSolver::GetObjectiveValue() const {
526 return problem_objective_value_;
528
529Fractional LPSolver::GetMaximumPrimalInfeasibility() const {
530 return max_absolute_primal_infeasibility_;
532
533Fractional LPSolver::GetMaximumDualInfeasibility() const {
534 return max_absolute_dual_infeasibility_;
536
537bool LPSolver::MayHaveMultipleOptimalSolutions() const {
538 return may_have_multiple_solutions_;
540
541int LPSolver::GetNumberOfSimplexIterations() const {
542 return num_revised_simplex_iterations_;
544
545double LPSolver::DeterministicTime() const {
546 return revised_simplex_ == nullptr ? 0.0
547 : revised_simplex_->DeterministicTime();
548}
549
550void LPSolver::MovePrimalValuesWithinBounds(const LinearProgram& lp) {
551 const ColIndex num_cols = lp.num_variables();
552 DCHECK_EQ(num_cols, primal_values_.size());
553 Fractional error = 0.0;
554 for (ColIndex col(0); col < num_cols; ++col) {
557 DCHECK_LE(lower_bound, upper_bound);
558
559 error = std::max(error, primal_values_[col] - upper_bound);
560 error = std::max(error, lower_bound - primal_values_[col]);
561 primal_values_[col] = std::min(primal_values_[col], upper_bound);
562 primal_values_[col] = std::max(primal_values_[col], lower_bound);
563 }
564 SOLVER_LOG(&logger_, "Max. primal values move = ", error);
565}
566
567void LPSolver::MoveDualValuesWithinBounds(const LinearProgram& lp) {
568 const RowIndex num_rows = lp.num_constraints();
569 DCHECK_EQ(num_rows, dual_values_.size());
570 const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
571 Fractional error = 0.0;
572 for (RowIndex row(0); row < num_rows; ++row) {
573 const Fractional lower_bound = lp.constraint_lower_bounds()[row];
574 const Fractional upper_bound = lp.constraint_upper_bounds()[row];
575
576 // For a minimization problem, we want a lower bound.
577 Fractional minimization_dual_value = optimization_sign * dual_values_[row];
578 if (lower_bound == -kInfinity && minimization_dual_value > 0.0) {
579 error = std::max(error, minimization_dual_value);
580 minimization_dual_value = 0.0;
581 }
582 if (upper_bound == kInfinity && minimization_dual_value < 0.0) {
583 error = std::max(error, -minimization_dual_value);
584 minimization_dual_value = 0.0;
585 }
586 dual_values_[row] = optimization_sign * minimization_dual_value;
587 }
588 SOLVER_LOG(&logger_, "Max. dual values move = ", error);
589}
590
591void LPSolver::ResizeSolution(RowIndex num_rows, ColIndex num_cols) {
592 primal_values_.resize(num_cols, 0.0);
593 reduced_costs_.resize(num_cols, 0.0);
594 variable_statuses_.resize(num_cols, VariableStatus::FREE);
595
596 dual_values_.resize(num_rows, 0.0);
597 constraint_activities_.resize(num_rows, 0.0);
598 constraint_statuses_.resize(num_rows, ConstraintStatus::FREE);
599}
600
601void LPSolver::RunRevisedSimplexIfNeeded(ProblemSolution* solution,
602 TimeLimit* time_limit) {
603 // Note that the transpose matrix is no longer needed at this point.
604 // This helps reduce the peak memory usage of the solver.
605 //
606 // TODO(user): actually, once the linear_program is loaded into the internal
607 // glop memory, there is no point keeping it around. Add a more complex
608 // Load/Solve API to RevisedSimplex so we can completely reclaim its memory
609 // right away.
610 current_linear_program_.ClearTransposeMatrix();
611 if (solution->status != ProblemStatus::INIT) return;
612 if (revised_simplex_ == nullptr) {
613 revised_simplex_ = std::make_unique<RevisedSimplex>();
614 revised_simplex_->SetLogger(&logger_);
615 }
616 revised_simplex_->SetParameters(parameters_);
617 if (revised_simplex_->Solve(current_linear_program_, time_limit).ok()) {
618 num_revised_simplex_iterations_ = revised_simplex_->GetNumberOfIterations();
619 solution->status = revised_simplex_->GetProblemStatus();
620
621 // Make sure we do not copy the slacks added by revised_simplex_.
622 const ColIndex num_cols = solution->primal_values.size();
623 DCHECK_LE(num_cols, revised_simplex_->GetProblemNumCols());
624 for (ColIndex col(0); col < num_cols; ++col) {
625 solution->primal_values[col] = revised_simplex_->GetVariableValue(col);
626 solution->variable_statuses[col] =
627 revised_simplex_->GetVariableStatus(col);
628 }
629 const RowIndex num_rows = revised_simplex_->GetProblemNumRows();
630 DCHECK_EQ(solution->dual_values.size(), num_rows);
631 for (RowIndex row(0); row < num_rows; ++row) {
632 solution->dual_values[row] = revised_simplex_->GetDualValue(row);
633 solution->constraint_statuses[row] =
634 revised_simplex_->GetConstraintStatus(row);
635 }
636 if (!parameters_.use_preprocessing() && !parameters_.use_scaling()) {
637 if (solution->status == ProblemStatus::PRIMAL_UNBOUNDED) {
638 primal_ray_ = revised_simplex_->GetPrimalRay();
639 // Make sure we do not copy the slacks added by revised_simplex_.
640 primal_ray_.resize(num_cols);
641 } else if (solution->status == ProblemStatus::DUAL_UNBOUNDED) {
642 constraints_dual_ray_ = revised_simplex_->GetDualRay();
643 variable_bounds_dual_ray_ =
644 revised_simplex_->GetDualRayRowCombination();
645 // Make sure we do not copy the slacks added by revised_simplex_.
646 variable_bounds_dual_ray_.resize(num_cols);
647 // Revised simplex's GetDualRay is always such that GetDualRay.rhs < 0,
648 // which is a cost improving direction for the dual if the primal is a
649 // maximization problem (i.e. when the dual is a minimization problem).
650 // Hence, we change the sign of constraints_dual_ray_ for min problems.
651 //
652 // Revised simplex's GetDualRayRowCombination = A^T GetDualRay and
653 // we must have variable_bounds_dual_ray_ = - A^T constraints_dual_ray_.
654 // Then we need to change the sign of variable_bounds_dual_ray_, but for
655 // min problems this change is implicit because of the sign change of
656 // constraints_dual_ray_ described above.
657 if (current_linear_program_.IsMaximizationProblem()) {
658 ChangeSign(&variable_bounds_dual_ray_);
659 } else {
660 ChangeSign(&constraints_dual_ray_);
661 }
662 }
663 }
664 } else {
665 SOLVER_LOG(&logger_, "Error during the revised simplex algorithm.");
666 solution->status = ProblemStatus::ABNORMAL;
667 }
668}
669
670namespace {
671
672void LogVariableStatusError(ColIndex col, Fractional value,
673 VariableStatus status, Fractional lb,
674 Fractional ub) {
675 VLOG(1) << "Variable " << col << " status is "
676 << GetVariableStatusString(status) << " but its value is " << value
677 << " and its bounds are [" << lb << ", " << ub << "].";
678}
679
680void LogConstraintStatusError(RowIndex row, ConstraintStatus status,
681 Fractional lb, Fractional ub) {
682 VLOG(1) << "Constraint " << row << " status is "
683 << GetConstraintStatusString(status) << " but its bounds are [" << lb
684 << ", " << ub << "].";
685}
686
687} // namespace
688
689bool LPSolver::IsProblemSolutionConsistent(
690 const LinearProgram& lp, const ProblemSolution& solution) const {
691 const RowIndex num_rows = lp.num_constraints();
692 const ColIndex num_cols = lp.num_variables();
693 if (solution.variable_statuses.size() != num_cols) return false;
694 if (solution.constraint_statuses.size() != num_rows) return false;
695 if (solution.primal_values.size() != num_cols) return false;
696 if (solution.dual_values.size() != num_rows) return false;
697 if (solution.status != ProblemStatus::OPTIMAL &&
698 solution.status != ProblemStatus::PRIMAL_FEASIBLE &&
699 solution.status != ProblemStatus::DUAL_FEASIBLE) {
700 return true;
701 }
702
703 // This checks that the variable statuses verify the properties described
704 // in the VariableStatus declaration.
705 RowIndex num_basic_variables(0);
706 for (ColIndex col(0); col < num_cols; ++col) {
707 const Fractional value = solution.primal_values[col];
708 const Fractional lb = lp.variable_lower_bounds()[col];
709 const Fractional ub = lp.variable_upper_bounds()[col];
710 const VariableStatus status = solution.variable_statuses[col];
711 switch (solution.variable_statuses[col]) {
712 case VariableStatus::BASIC:
713 // TODO(user): Check that the reduced cost of this column is epsilon
714 // close to zero.
715 ++num_basic_variables;
716 break;
717 case VariableStatus::FIXED_VALUE:
718 // TODO(user): Because of scaling, it is possible that a FIXED_VALUE
719 // status (only reserved for the exact lb == ub case) is now set for a
720 // variable where (ub == lb + epsilon). So we do not check here that the
721 // two bounds are exactly equal. The best is probably to remove the
722 // FIXED status from the API completely and report one of AT_LOWER_BOUND
723 // or AT_UPPER_BOUND instead. This also allows to indicate if at
724 // optimality, the objective is limited because of this variable lower
725 // bound or its upper bound. Note that there are other TODOs in the
726 // codebase about removing this FIXED_VALUE status.
727 if (value != ub && value != lb) {
728 LogVariableStatusError(col, value, status, lb, ub);
729 return false;
730 }
731 break;
732 case VariableStatus::AT_LOWER_BOUND:
733 if (value != lb || lb == ub) {
734 LogVariableStatusError(col, value, status, lb, ub);
735 return false;
736 }
737 break;
738 case VariableStatus::AT_UPPER_BOUND:
739 // TODO(user): revert to an exact comparison once the bug causing this
740 // to fail has been fixed.
741 if (!AreWithinAbsoluteTolerance(value, ub, 1e-7) || lb == ub) {
742 LogVariableStatusError(col, value, status, lb, ub);
743 return false;
744 }
745 break;
746 case VariableStatus::FREE:
747 if (lb != -kInfinity || ub != kInfinity || value != 0.0) {
748 LogVariableStatusError(col, value, status, lb, ub);
749 return false;
750 }
751 break;
752 }
753 }
754 for (RowIndex row(0); row < num_rows; ++row) {
755 const Fractional dual_value = solution.dual_values[row];
756 const Fractional lb = lp.constraint_lower_bounds()[row];
757 const Fractional ub = lp.constraint_upper_bounds()[row];
758 const ConstraintStatus status = solution.constraint_statuses[row];
759
760 // The activity value is not checked since it is imprecise.
761 // TODO(user): Check that the activity is epsilon close to the expected
762 // value.
763 switch (status) {
764 case ConstraintStatus::BASIC:
765 if (dual_value != 0.0) {
766 VLOG(1) << "Constraint " << row << " is BASIC, but its dual value is "
767 << dual_value << " instead of 0.";
768 return false;
769 }
770 ++num_basic_variables;
771 break;
772 case ConstraintStatus::FIXED_VALUE:
773 // Exactly the same remark as for the VariableStatus::FIXED_VALUE case
774 // above. Because of precision error, this can happen when the
775 // difference between the two bounds is small and not just exactly zero.
776 if (ub - lb > 1e-12) {
777 LogConstraintStatusError(row, status, lb, ub);
778 return false;
779 }
780 break;
781 case ConstraintStatus::AT_LOWER_BOUND:
782 if (lb == -kInfinity) {
783 LogConstraintStatusError(row, status, lb, ub);
784 return false;
785 }
786 break;
787 case ConstraintStatus::AT_UPPER_BOUND:
788 if (ub == kInfinity) {
789 LogConstraintStatusError(row, status, lb, ub);
790 return false;
791 }
792 break;
793 case ConstraintStatus::FREE:
794 if (dual_value != 0.0) {
795 VLOG(1) << "Constraint " << row << " is FREE, but its dual value is "
796 << dual_value << " instead of 0.";
797 return false;
798 }
799 if (lb != -kInfinity || ub != kInfinity) {
800 LogConstraintStatusError(row, status, lb, ub);
801 return false;
802 }
803 break;
804 }
805 }
806
807 // TODO(user): We could check in debug mode (because it will be costly) that
808 // the basis is actually factorizable.
809 if (num_basic_variables != num_rows) {
810 VLOG(1) << "Wrong number of basic variables: " << num_basic_variables;
811 return false;
812 }
813 return true;
814}
815
816// This computes by how much the objective must be perturbed to enforce the
817// following complementary slackness conditions:
818// - Reduced cost is exactly zero for FREE and BASIC variables.
819// - Reduced cost is of the correct sign for variables at their bounds.
820Fractional LPSolver::ComputeMaxCostPerturbationToEnforceOptimality(
821 const LinearProgram& lp, bool* is_too_large) {
822 Fractional max_cost_correction = 0.0;
823 const ColIndex num_cols = lp.num_variables();
824 const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
825 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
826 for (ColIndex col(0); col < num_cols; ++col) {
827 // We correct the reduced cost, so we have a minimization problem and
828 // thus the dual objective value will be a lower bound of the primal
829 // objective.
830 const Fractional reduced_cost = optimization_sign * reduced_costs_[col];
831 const VariableStatus status = variable_statuses_[col];
832 if (status == VariableStatus::BASIC || status == VariableStatus::FREE ||
833 (status == VariableStatus::AT_UPPER_BOUND && reduced_cost > 0.0) ||
834 (status == VariableStatus::AT_LOWER_BOUND && reduced_cost < 0.0)) {
835 max_cost_correction =
836 std::max(max_cost_correction, std::abs(reduced_cost));
837 *is_too_large |=
838 std::abs(reduced_cost) >
839 AllowedError(tolerance, lp.objective_coefficients()[col]);
840 }
841 }
842 SOLVER_LOG(&logger_, "Max. cost perturbation = ", max_cost_correction);
843 return max_cost_correction;
844}
845
846// This computes by how much the rhs must be perturbed to enforce the fact that
847// the constraint activities exactly reflect their status.
848Fractional LPSolver::ComputeMaxRhsPerturbationToEnforceOptimality(
849 const LinearProgram& lp, bool* is_too_large) {
850 Fractional max_rhs_correction = 0.0;
851 const RowIndex num_rows = lp.num_constraints();
852 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
853 for (RowIndex row(0); row < num_rows; ++row) {
854 const Fractional lower_bound = lp.constraint_lower_bounds()[row];
855 const Fractional upper_bound = lp.constraint_upper_bounds()[row];
856 const Fractional activity = constraint_activities_[row];
857 const ConstraintStatus status = constraint_statuses_[row];
858
859 Fractional rhs_error = 0.0;
860 Fractional allowed_error = 0.0;
861 if (status == ConstraintStatus::AT_LOWER_BOUND || activity < lower_bound) {
862 rhs_error = std::abs(activity - lower_bound);
863 allowed_error = AllowedError(tolerance, lower_bound);
864 } else if (status == ConstraintStatus::AT_UPPER_BOUND ||
865 activity > upper_bound) {
866 rhs_error = std::abs(activity - upper_bound);
867 allowed_error = AllowedError(tolerance, upper_bound);
868 }
869 max_rhs_correction = std::max(max_rhs_correction, rhs_error);
870 *is_too_large |= rhs_error > allowed_error;
871 }
872 SOLVER_LOG(&logger_, "Max. rhs perturbation = ", max_rhs_correction);
873 return max_rhs_correction;
874}
875
876void LPSolver::ComputeConstraintActivities(const LinearProgram& lp) {
877 const RowIndex num_rows = lp.num_constraints();
878 const ColIndex num_cols = lp.num_variables();
879 DCHECK_EQ(num_cols, primal_values_.size());
880 constraint_activities_.assign(num_rows, 0.0);
881 for (ColIndex col(0); col < num_cols; ++col) {
882 lp.GetSparseColumn(col).AddMultipleToDenseVector(primal_values_[col],
883 &constraint_activities_);
884 }
885}
886
887void LPSolver::ComputeReducedCosts(const LinearProgram& lp) {
888 const RowIndex num_rows = lp.num_constraints();
889 const ColIndex num_cols = lp.num_variables();
890 DCHECK_EQ(num_rows, dual_values_.size());
891 reduced_costs_.resize(num_cols, 0.0);
892 for (ColIndex col(0); col < num_cols; ++col) {
893 reduced_costs_[col] = lp.objective_coefficients()[col] -
894 ScalarProduct(dual_values_, lp.GetSparseColumn(col));
895 }
896}
897
898double LPSolver::ComputeObjective(const LinearProgram& lp) {
899 const ColIndex num_cols = lp.num_variables();
900 DCHECK_EQ(num_cols, primal_values_.size());
901 KahanSum sum;
902 for (ColIndex col(0); col < num_cols; ++col) {
903 sum.Add(lp.objective_coefficients()[col] * primal_values_[col]);
904 }
905 return sum.Value();
906}
907
908// By the duality theorem, the dual "objective" is a bound on the primal
909// objective obtained by taking the linear combinaison of the constraints
910// given by dual_values_.
911//
912// As it is written now, this has no real precise meaning since we ignore
913// infeasible reduced costs. This is almost the same as computing the objective
914// to the perturbed problem, but then we don't use the pertubed rhs. It is just
915// here as an extra "consistency" check.
916//
917// Note(user): We could actually compute an EXACT lower bound for the cost of
918// the non-cost perturbed problem. The idea comes from "Safe bounds in linear
919// and mixed-integer linear programming", Arnold Neumaier , Oleg Shcherbina,
920// Math Prog, 2003. Note that this requires having some variable bounds that may
921// not be in the original problem so that the current dual solution is always
922// feasible. It also involves changing the rounding mode to obtain exact
923// confidence intervals on the reduced costs.
924double LPSolver::ComputeDualObjective(const LinearProgram& lp) {
925 KahanSum dual_objective;
926
927 // Compute the part coming from the row constraints.
928 const RowIndex num_rows = lp.num_constraints();
929 const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
930 for (RowIndex row(0); row < num_rows; ++row) {
931 const Fractional lower_bound = lp.constraint_lower_bounds()[row];
932 const Fractional upper_bound = lp.constraint_upper_bounds()[row];
933
934 // We correct the optimization_sign so we have to compute a lower bound.
935 const Fractional corrected_value = optimization_sign * dual_values_[row];
936 if (corrected_value > 0.0 && lower_bound != -kInfinity) {
937 dual_objective.Add(dual_values_[row] * lower_bound);
938 }
939 if (corrected_value < 0.0 && upper_bound != kInfinity) {
940 dual_objective.Add(dual_values_[row] * upper_bound);
941 }
942 }
943
944 // For a given column associated to a variable x, we want to find a lower
945 // bound for c.x (where c is the objective coefficient for this column). If we
946 // write a.x the linear combination of the constraints at this column we have:
947 // (c + a - c) * x = a * x, and so
948 // c * x = a * x + (c - a) * x
949 // Now, if we suppose for example that the reduced cost 'c - a' is positive
950 // and that x is lower-bounded by 'lb' then the best bound we can get is
951 // c * x >= a * x + (c - a) * lb.
952 //
953 // Note: when summing over all variables, the left side is the primal
954 // objective and the right side is a lower bound to the objective. In
955 // particular, a necessary and sufficient condition for both objectives to be
956 // the same is that all the single variable inequalities above be equalities.
957 // This is possible only if c == a or if x is at its bound (modulo the
958 // optimization_sign of the reduced cost), or both (this is one side of the
959 // complementary slackness conditions, see Chvatal p. 62).
960 const ColIndex num_cols = lp.num_variables();
961 for (ColIndex col(0); col < num_cols; ++col) {
962 const Fractional lower_bound = lp.variable_lower_bounds()[col];
963 const Fractional upper_bound = lp.variable_upper_bounds()[col];
964
965 // Correct the reduced cost, so as to have a minimization problem and
966 // thus a dual objective that is a lower bound of the primal objective.
967 const Fractional reduced_cost = optimization_sign * reduced_costs_[col];
968
969 // We do not do any correction if the reduced cost is 'infeasible', which is
970 // the same as computing the objective of the perturbed problem.
971 Fractional correction = 0.0;
972 if (variable_statuses_[col] == VariableStatus::AT_LOWER_BOUND &&
973 reduced_cost > 0.0) {
974 correction = reduced_cost * lower_bound;
975 } else if (variable_statuses_[col] == VariableStatus::AT_UPPER_BOUND &&
976 reduced_cost < 0.0) {
977 correction = reduced_cost * upper_bound;
978 } else if (variable_statuses_[col] == VariableStatus::FIXED_VALUE) {
979 correction = reduced_cost * upper_bound;
980 }
981 // Now apply the correction in the right direction!
982 dual_objective.Add(optimization_sign * correction);
983 }
984 return dual_objective.Value();
985}
986
987double LPSolver::ComputeMaxExpectedObjectiveError(const LinearProgram& lp) {
988 const ColIndex num_cols = lp.num_variables();
989 DCHECK_EQ(num_cols, primal_values_.size());
990 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
991 Fractional primal_objective_error = 0.0;
992 for (ColIndex col(0); col < num_cols; ++col) {
993 // TODO(user): Be more precise since the non-BASIC variables are exactly at
994 // their bounds, so for them the error bound is just the term magnitude
995 // times std::numeric_limits<double>::epsilon() with KahanSum.
996 primal_objective_error += std::abs(lp.objective_coefficients()[col]) *
997 AllowedError(tolerance, primal_values_[col]);
998 }
999 return primal_objective_error;
1000}
1001
1002double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp,
1003 bool* is_too_large) {
1004 double infeasibility = 0.0;
1005 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
1006 const ColIndex num_cols = lp.num_variables();
1007 for (ColIndex col(0); col < num_cols; ++col) {
1008 const Fractional lower_bound = lp.variable_lower_bounds()[col];
1009 const Fractional upper_bound = lp.variable_upper_bounds()[col];
1010 DCHECK(IsFinite(primal_values_[col]));
1011
1012 if (lower_bound == upper_bound) {
1013 const Fractional error = std::abs(primal_values_[col] - upper_bound);
1014 infeasibility = std::max(infeasibility, error);
1015 *is_too_large |= error > AllowedError(tolerance, upper_bound);
1016 continue;
1017 }
1018 if (primal_values_[col] > upper_bound) {
1019 const Fractional error = primal_values_[col] - upper_bound;
1020 infeasibility = std::max(infeasibility, error);
1021 *is_too_large |= error > AllowedError(tolerance, upper_bound);
1022 }
1023 if (primal_values_[col] < lower_bound) {
1024 const Fractional error = lower_bound - primal_values_[col];
1025 infeasibility = std::max(infeasibility, error);
1026 *is_too_large |= error > AllowedError(tolerance, lower_bound);
1027 }
1028 }
1029 return infeasibility;
1030}
1031
1032double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp,
1033 bool* is_too_large) {
1034 double infeasibility = 0.0;
1035 int num_problematic_rows(0);
1036 const RowIndex num_rows = lp.num_constraints();
1037 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
1038 for (RowIndex row(0); row < num_rows; ++row) {
1039 const Fractional activity = constraint_activities_[row];
1040 const Fractional lower_bound = lp.constraint_lower_bounds()[row];
1041 const Fractional upper_bound = lp.constraint_upper_bounds()[row];
1042 DCHECK(IsFinite(activity));
1043
1044 if (lower_bound == upper_bound) {
1045 if (std::abs(activity - upper_bound) >
1046 AllowedError(tolerance, upper_bound)) {
1047 VLOG(2) << "Row " << row.value() << " has activity " << activity
1048 << " which is different from " << upper_bound << " by "
1049 << activity - upper_bound;
1050 ++num_problematic_rows;
1051 }
1052 infeasibility = std::max(infeasibility, std::abs(activity - upper_bound));
1053 continue;
1054 }
1055 if (activity > upper_bound) {
1056 const Fractional row_excess = activity - upper_bound;
1057 if (row_excess > AllowedError(tolerance, upper_bound)) {
1058 VLOG(2) << "Row " << row.value() << " has activity " << activity
1059 << ", exceeding its upper bound " << upper_bound << " by "
1060 << row_excess;
1061 ++num_problematic_rows;
1062 }
1063 infeasibility = std::max(infeasibility, row_excess);
1064 }
1065 if (activity < lower_bound) {
1066 const Fractional row_deficit = lower_bound - activity;
1067 if (row_deficit > AllowedError(tolerance, lower_bound)) {
1068 VLOG(2) << "Row " << row.value() << " has activity " << activity
1069 << ", below its lower bound " << lower_bound << " by "
1070 << row_deficit;
1071 ++num_problematic_rows;
1072 }
1073 infeasibility = std::max(infeasibility, row_deficit);
1074 }
1075 }
1076 if (num_problematic_rows > 0) {
1077 *is_too_large = true;
1078 VLOG(1) << "Number of infeasible rows = " << num_problematic_rows;
1079 }
1080 return infeasibility;
1081}
1082
1083double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp,
1084 bool* is_too_large) {
1085 const Fractional allowed_error = parameters_.solution_feasibility_tolerance();
1086 const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
1087 double infeasibility = 0.0;
1088 const RowIndex num_rows = lp.num_constraints();
1089 for (RowIndex row(0); row < num_rows; ++row) {
1090 const Fractional dual_value = dual_values_[row];
1091 const Fractional lower_bound = lp.constraint_lower_bounds()[row];
1092 const Fractional upper_bound = lp.constraint_upper_bounds()[row];
1093 DCHECK(IsFinite(dual_value));
1094 const Fractional minimization_dual_value = optimization_sign * dual_value;
1095 if (lower_bound == -kInfinity) {
1096 *is_too_large |= minimization_dual_value > allowed_error;
1097 infeasibility = std::max(infeasibility, minimization_dual_value);
1098 }
1099 if (upper_bound == kInfinity) {
1100 *is_too_large |= -minimization_dual_value > allowed_error;
1101 infeasibility = std::max(infeasibility, -minimization_dual_value);
1102 }
1103 }
1104 return infeasibility;
1105}
1106
1107double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram& lp,
1108 bool* is_too_large) {
1109 const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
1110 double infeasibility = 0.0;
1111 const ColIndex num_cols = lp.num_variables();
1112 const Fractional tolerance = parameters_.solution_feasibility_tolerance();
1113 for (ColIndex col(0); col < num_cols; ++col) {
1114 const Fractional reduced_cost = reduced_costs_[col];
1115 const Fractional lower_bound = lp.variable_lower_bounds()[col];
1116 const Fractional upper_bound = lp.variable_upper_bounds()[col];
1117 DCHECK(IsFinite(reduced_cost));
1118 const Fractional minimization_reduced_cost =
1119 optimization_sign * reduced_cost;
1120 const Fractional allowed_error =
1121 AllowedError(tolerance, lp.objective_coefficients()[col]);
1122 if (lower_bound == -kInfinity) {
1123 *is_too_large |= minimization_reduced_cost > allowed_error;
1124 infeasibility = std::max(infeasibility, minimization_reduced_cost);
1125 }
1126 if (upper_bound == kInfinity) {
1127 *is_too_large |= -minimization_reduced_cost > allowed_error;
1128 infeasibility = std::max(infeasibility, -minimization_reduced_cost);
1129 }
1130 }
1131 return infeasibility;
1132}
1133
1134} // namespace glop
1135} // namespace operations_research
FpNumber Value() const
Gets the value of the sum.
std::string GetDimensionString() const
A short string with the problem dimension.
Definition lp_data.cc:434
bool IsValid(Fractional max_valid_magnitude=kInfinity) const
Definition lp_data.cc:1316
const DenseRow & variable_lower_bounds() const
Definition lp_data.h:237
const DenseRow & variable_upper_bounds() const
Definition lp_data.h:240
Fractional objective_scaling_factor() const
Definition lp_data.h:269
ColIndex num_variables() const
Returns the number of variables.
Definition lp_data.h:213
std::string GetObjectiveStatsString() const
A short line with some stats on the problem coefficients.
Definition lp_data.cc:461
Fractional objective_offset() const
Returns the objective offset and scaling factor.
Definition lp_data.h:268
RowIndex num_constraints() const
Returns the number of constraints.
Definition lp_data.h:216
std::string GetBoundsStatsString() const
Definition lp_data.cc:474
void push_back(const value_type &val)
SatParameters parameters
CpModelProto proto
The output proto.
int64_t value
absl::Status status
Definition g_gurobi.cc:44
double upper_bound
double lower_bound
time_limit
Definition solve.cc:22
ABSL_FLAG(bool, lp_dump_to_proto_file, false, "Tells whether do dump the problem to a protobuf file.")
ColIndex col
Definition markowitz.cc:187
RowIndex row
Definition markowitz.cc:186
double solution
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:52
AccurateSum< Fractional > KahanSum
Definition lp_utils.h:37
void LinearProgramToMPModelProto(const LinearProgram &input, MPModelProto *output)
Converts a LinearProgram to a MPModelProto.
std::string GetVariableStatusString(VariableStatus status)
Returns the string representation of the VariableStatus enum.
Definition lp_types.cc:75
bool IsFinite(Fractional value)
Definition lp_types.h:96
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
Changes the sign of all the entries in the given vector.
Definition lp_utils.h:307
std::string GetConstraintStatusString(ConstraintStatus status)
Returns the string representation of the ConstraintStatus enum.
Definition lp_types.cc:94
ProblemStatus
Different statuses for a given problem.
Definition lp_types.h:107
std::string GetProblemStatusString(ProblemStatus problem_status)
Returns the string representation of the ProblemStatus enum.
Definition lp_types.cc:23
In SWIG mode, we don't want anything besides these top-level includes.
std::string OrToolsVersionString()
Definition version.cc:28
bool AreWithinAbsoluteTolerance(FloatType x, FloatType y, FloatType absolute_tolerance)
Definition fp_utils.h:147
std::string ProtobufShortDebugString(const P &message)
Definition proto_utils.h:41
absl::Status WriteProtoToFile(absl::string_view filename, const google::protobuf::Message &proto, ProtoWriteFormat proto_write_format, bool gzipped, bool append_extension_to_file_name)
Definition file_util.cc:142
glop::MainLpPreprocessor preprocessor
#define SOLVER_LOG(logger,...)
Definition logging.h:109