Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
primal_dual_hybrid_gradient.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <atomic>
18#include <cmath>
19#include <cstdint>
20#include <functional>
21#include <limits>
22#include <optional>
23#include <random>
24#include <string>
25#include <utility>
26#include <vector>
27
28#include "Eigen/Core"
29#include "Eigen/SparseCore"
30#include "absl/algorithm/container.h"
31#include "absl/log/check.h"
32#include "absl/status/status.h"
33#include "absl/status/statusor.h"
34#include "absl/strings/str_cat.h"
35#include "absl/strings/str_format.h"
36#include "absl/strings/string_view.h"
37#include "absl/time/clock.h"
38#include "absl/time/time.h"
39#include "google/protobuf/repeated_ptr_field.h"
42#include "ortools/base/timer.h"
43#include "ortools/glop/parameters.pb.h"
45#include "ortools/linear_solver/linear_solver.pb.h"
54#include "ortools/pdlp/solve_log.pb.h"
55#include "ortools/pdlp/solvers.pb.h"
60
62
63namespace {
64
65using ::Eigen::VectorXd;
66using ::operations_research::SolverLogger;
67
68using IterationStatsCallback =
69 std::function<void(const IterationCallbackInfo&)>;
70
71// Computes a `num_threads` that is capped by the problem size and `num_shards`,
72// if specified, to avoid creating unusable threads.
73int NumThreads(const int num_threads, const int num_shards,
74 const QuadraticProgram& qp, SolverLogger& logger) {
75 int capped_num_threads = num_threads;
76 if (num_shards > 0) {
77 capped_num_threads = std::min(capped_num_threads, num_shards);
78 }
79 const int64_t problem_limit = std::max(qp.variable_lower_bounds.size(),
80 qp.constraint_lower_bounds.size());
81 capped_num_threads =
82 static_cast<int>(std::min(int64_t{capped_num_threads}, problem_limit));
83 capped_num_threads = std::max(capped_num_threads, 1);
84 if (capped_num_threads != num_threads) {
85 SOLVER_LOG(&logger, "WARNING: Reducing num_threads from ", num_threads,
86 " to ", capped_num_threads,
87 " because additional threads would be useless.");
88 }
89 return capped_num_threads;
90}
91
92// If `num_shards` is positive, returns it. Otherwise returns a reasonable
93// number of shards to use with `ShardedQuadraticProgram` for the given
94// `num_threads`.
95int NumShards(const int num_threads, const int num_shards) {
96 if (num_shards > 0) return num_shards;
97 return num_threads == 1 ? 1 : 4 * num_threads;
98}
99
100std::string ConvergenceInformationString(
101 const ConvergenceInformation& convergence_information,
102 const RelativeConvergenceInformation& relative_information,
103 const OptimalityNorm residual_norm) {
104 constexpr absl::string_view kFormatStr =
105 "%#12.6g %#12.6g %#12.6g | %#12.6g %#12.6g %#12.6g | %#12.6g %#12.6g | "
106 "%#12.6g %#12.6g";
107 switch (residual_norm) {
108 case OPTIMALITY_NORM_L_INF:
109 return absl::StrFormat(
110 kFormatStr, relative_information.relative_l_inf_primal_residual,
111 relative_information.relative_l_inf_dual_residual,
112 relative_information.relative_optimality_gap,
113 convergence_information.l_inf_primal_residual(),
114 convergence_information.l_inf_dual_residual(),
115 convergence_information.primal_objective() -
116 convergence_information.dual_objective(),
117 convergence_information.primal_objective(),
118 convergence_information.dual_objective(),
119 convergence_information.l2_primal_variable(),
120 convergence_information.l2_dual_variable());
121 case OPTIMALITY_NORM_L2:
122 return absl::StrFormat(kFormatStr,
123 relative_information.relative_l2_primal_residual,
124 relative_information.relative_l2_dual_residual,
125 relative_information.relative_optimality_gap,
126 convergence_information.l2_primal_residual(),
127 convergence_information.l2_dual_residual(),
128 convergence_information.primal_objective() -
129 convergence_information.dual_objective(),
130 convergence_information.primal_objective(),
131 convergence_information.dual_objective(),
132 convergence_information.l2_primal_variable(),
133 convergence_information.l2_dual_variable());
134 case OPTIMALITY_NORM_L_INF_COMPONENTWISE:
135 return absl::StrFormat(
136 kFormatStr,
137 convergence_information.l_inf_componentwise_primal_residual(),
138 convergence_information.l_inf_componentwise_dual_residual(),
139 relative_information.relative_optimality_gap,
140 convergence_information.l_inf_primal_residual(),
141 convergence_information.l_inf_dual_residual(),
142 convergence_information.primal_objective() -
143 convergence_information.dual_objective(),
144 convergence_information.primal_objective(),
145 convergence_information.dual_objective(),
146 convergence_information.l2_primal_variable(),
147 convergence_information.l2_dual_variable());
148 case OPTIMALITY_NORM_UNSPECIFIED:
149 LOG(FATAL) << "Residual norm not specified.";
150 }
151 LOG(FATAL) << "Invalid residual norm " << residual_norm << ".";
152}
153
154std::string ConvergenceInformationShortString(
155 const ConvergenceInformation& convergence_information,
156 const RelativeConvergenceInformation& relative_information,
157 const OptimalityNorm residual_norm) {
158 constexpr absl::string_view kFormatStr =
159 "%#10.4g %#10.4g %#10.4g | %#10.4g %#10.4g";
160 switch (residual_norm) {
161 case OPTIMALITY_NORM_L_INF:
162 return absl::StrFormat(
163 kFormatStr, relative_information.relative_l_inf_primal_residual,
164 relative_information.relative_l_inf_dual_residual,
165 relative_information.relative_optimality_gap,
166 convergence_information.primal_objective(),
167 convergence_information.dual_objective());
168 case OPTIMALITY_NORM_L2:
169 return absl::StrFormat(kFormatStr,
170 relative_information.relative_l2_primal_residual,
171 relative_information.relative_l2_dual_residual,
172 relative_information.relative_optimality_gap,
173 convergence_information.primal_objective(),
174 convergence_information.dual_objective());
175 case OPTIMALITY_NORM_L_INF_COMPONENTWISE:
176 return absl::StrFormat(
177 kFormatStr,
178 convergence_information.l_inf_componentwise_primal_residual(),
179 convergence_information.l_inf_componentwise_dual_residual(),
180 relative_information.relative_optimality_gap,
181 convergence_information.primal_objective(),
182 convergence_information.dual_objective());
183 case OPTIMALITY_NORM_UNSPECIFIED:
184 LOG(FATAL) << "Residual norm not specified.";
185 }
186 LOG(FATAL) << "Invalid residual norm " << residual_norm << ".";
187}
188
189// Logs a description of `iter_stats`, based on the
190// `iter_stats.convergence_information` entry with
191// `.candidate_type()==preferred_candidate` if one exists, otherwise based on
192// the first value, if any. `termination_criteria.optimality_norm` determines
193// which residual norms from `iter_stats.convergence_information` are used.
194void LogIterationStats(int verbosity_level, bool use_feasibility_polishing,
195 IterationType iteration_type,
196 const IterationStats& iter_stats,
197 const TerminationCriteria& termination_criteria,
198 const QuadraticProgramBoundNorms& bound_norms,
199 PointType preferred_candidate, SolverLogger& logger) {
200 std::string iteration_string =
201 verbosity_level >= 3
202 ? absl::StrFormat("%6d %8.1f %6.1f", iter_stats.iteration_number(),
203 iter_stats.cumulative_kkt_matrix_passes(),
204 iter_stats.cumulative_time_sec())
205 : absl::StrFormat("%6d %6.1f", iter_stats.iteration_number(),
206 iter_stats.cumulative_time_sec());
207 auto convergence_information =
208 GetConvergenceInformation(iter_stats, preferred_candidate);
209 if (!convergence_information.has_value() &&
210 iter_stats.convergence_information_size() > 0) {
211 convergence_information = iter_stats.convergence_information(0);
212 }
213 const char* phase_string = [&]() {
214 if (use_feasibility_polishing) {
215 switch (iteration_type) {
217 return "n ";
219 return "p ";
221 return "d ";
225 return "t ";
226 }
227 } else {
228 return "";
229 }
230 }();
231 if (convergence_information.has_value()) {
232 const char* iterate_string = [&]() {
233 if (verbosity_level >= 4) {
234 switch (convergence_information->candidate_type()) {
235 case POINT_TYPE_CURRENT_ITERATE:
236 return "C ";
237 case POINT_TYPE_AVERAGE_ITERATE:
238 return "A ";
239 case POINT_TYPE_ITERATE_DIFFERENCE:
240 return "D ";
241 case POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION:
242 return "F ";
243 default:
244 return "? ";
245 }
246 } else {
247 return "";
248 }
249 }();
250 const RelativeConvergenceInformation relative_information =
252 EffectiveOptimalityCriteria(termination_criteria),
253 *convergence_information, bound_norms);
254 std::string convergence_string =
255 verbosity_level >= 3
256 ? ConvergenceInformationString(
257 *convergence_information, relative_information,
258 termination_criteria.optimality_norm())
259 : ConvergenceInformationShortString(
260 *convergence_information, relative_information,
261 termination_criteria.optimality_norm());
262 SOLVER_LOG(&logger, phase_string, iterate_string, iteration_string, " | ",
263 convergence_string);
264 } else {
265 // No convergence information, just log the basic work stats.
266 SOLVER_LOG(&logger, phase_string, verbosity_level >= 4 ? "? " : "",
267 iteration_string);
268 }
269}
270
271void LogIterationStatsHeader(int verbosity_level,
272 bool use_feasibility_polishing,
273 SolverLogger& logger) {
274 std::string work_labels =
275 verbosity_level >= 3
276 ? absl::StrFormat("%6s %8s %6s", "iter#", "kkt_pass", "time")
277 : absl::StrFormat("%6s %6s", "iter#", "time");
278 std::string convergence_labels =
279 verbosity_level >= 3
280 ? absl::StrFormat(
281 "%12s %12s %12s | %12s %12s %12s | %12s %12s | %12s %12s",
282 "rel_prim_res", "rel_dual_res", "rel_gap", "prim_resid",
283 "dual_resid", "obj_gap", "prim_obj", "dual_obj", "prim_var_l2",
284 "dual_var_l2")
285 : absl::StrFormat("%10s %10s %10s | %10s %10s", "rel_p_res",
286 "rel_d_res", "rel_gap", "prim_obj", "dual_obj");
287 SOLVER_LOG(&logger, use_feasibility_polishing ? "f " : "",
288 verbosity_level >= 4 ? "I " : "", work_labels, " | ",
289 convergence_labels);
290}
291
292enum class InnerStepOutcome {
293 kSuccessful,
294 kForceNumericalTermination,
295};
296
297// Makes the closing changes to `solve_log` and builds a `SolverResult`.
298// NOTE: `primal_solution`, `dual_solution`, and `solve_log` are passed by
299// value. To avoid unnecessary copying, move these objects (i.e. use
300// `std::move()`).
301SolverResult ConstructSolverResult(VectorXd primal_solution,
302 VectorXd dual_solution,
303 const IterationStats& stats,
304 TerminationReason termination_reason,
305 PointType output_type, SolveLog solve_log) {
306 solve_log.set_iteration_count(stats.iteration_number());
307 solve_log.set_termination_reason(termination_reason);
308 solve_log.set_solution_type(output_type);
309 solve_log.set_solve_time_sec(stats.cumulative_time_sec());
310 *solve_log.mutable_solution_stats() = stats;
311 return SolverResult{.primal_solution = std::move(primal_solution),
312 .dual_solution = std::move(dual_solution),
313 .solve_log = std::move(solve_log)};
314}
315
316class PreprocessSolver {
317 public:
318 // Assumes that `qp` and `params` are valid.
319 // Note that the `qp` is intentionally passed by value.
320 // `logger` is captured, and must outlive the class.
321 // NOTE: Many `PreprocessSolver` methods accept a `params` argument. This is
322 // passed as an argument instead of stored as a member variable to support
323 // using different `params` in different contexts with the same
324 // `PreprocessSolver` object.
325 explicit PreprocessSolver(QuadraticProgram qp,
326 const PrimalDualHybridGradientParams& params,
327 SolverLogger* logger);
328
329 // Not copyable or movable (because `glop::MainLpPreprocessor` isn't).
330 PreprocessSolver(const PreprocessSolver&) = delete;
331 PreprocessSolver& operator=(const PreprocessSolver&) = delete;
332 PreprocessSolver(PreprocessSolver&&) = delete;
333 PreprocessSolver& operator=(PreprocessSolver&&) = delete;
334
335 // Zero is used if `initial_solution` is nullopt. If `interrupt_solve` is not
336 // nullptr, then the solver will periodically check if
337 // `interrupt_solve->load()` is true, in which case the solve will terminate
338 // with `TERMINATION_REASON_INTERRUPTED_BY_USER`. Ownership is not
339 // transferred. If `iteration_stats_callback` is not nullptr, then at each
340 // termination step (when iteration stats are logged),
341 // `iteration_stats_callback` will also be called with those iteration stats.
342 SolverResult PreprocessAndSolve(
343 const PrimalDualHybridGradientParams& params,
344 std::optional<PrimalAndDualSolution> initial_solution,
345 const std::atomic<bool>* interrupt_solve,
346 IterationStatsCallback iteration_stats_callback);
347
348 // Returns a `TerminationReasonAndPointType` when the termination criteria are
349 // satisfied, otherwise returns nothing. The pointers to working_* can be
350 // nullptr if an iterate of that type is not available. For the iterate types
351 // that are available, uses the primal and dual vectors to compute solution
352 // statistics and adds them to the stats proto.
353 // `full_stats` is used for checking work-based termination criteria,
354 // but `stats` is updated with convergence information.
355 // NOTE: The primal and dual input pairs should be scaled solutions.
356 // TODO(user): Move this long argument list into a struct.
357 std::optional<TerminationReasonAndPointType>
358 UpdateIterationStatsAndCheckTermination(
359 const PrimalDualHybridGradientParams& params,
360 bool force_numerical_termination, const VectorXd& working_primal_current,
361 const VectorXd& working_dual_current,
362 const VectorXd* working_primal_average,
363 const VectorXd* working_dual_average,
364 const VectorXd* working_primal_delta, const VectorXd* working_dual_delta,
365 const VectorXd& last_primal_start_point,
366 const VectorXd& last_dual_start_point,
367 const std::atomic<bool>* interrupt_solve, IterationType iteration_type,
368 const IterationStats& full_stats, IterationStats& stats);
369
370 // Computes solution statistics for the primal and dual input pair, which
371 // should be a scaled solution. If `convergence_information != nullptr`,
372 // populates it with convergence information, and similarly if
373 // `infeasibility_information != nullptr`, populates it with infeasibility
374 // information.
375 void ComputeConvergenceAndInfeasibilityFromWorkingSolution(
376 const PrimalDualHybridGradientParams& params,
377 const VectorXd& working_primal, const VectorXd& working_dual,
378 PointType candidate_type, ConvergenceInformation* convergence_information,
379 InfeasibilityInformation* infeasibility_information) const;
380
381 // Returns a `SolverResult` for the original problem, given a `SolverResult`
382 // from the scaled or preprocessed problem. Also computes the reduced costs.
383 // NOTE: `result` is passed by value. To avoid unnecessary copying, move this
384 // object (i.e. use `std::move()`).
385 SolverResult ConstructOriginalSolverResult(
386 const PrimalDualHybridGradientParams& params, SolverResult result,
387 SolverLogger& logger) const;
388
389 const ShardedQuadraticProgram& ShardedWorkingQp() const {
390 return sharded_qp_;
391 }
392
393 // Swaps `variable_lower_bounds` and `variable_upper_bounds` with the ones on
394 // the sharded quadratic program.
395 void SwapVariableBounds(VectorXd& variable_lower_bounds,
396 VectorXd& variable_upper_bounds) {
397 sharded_qp_.SwapVariableBounds(variable_lower_bounds,
398 variable_upper_bounds);
399 }
400
401 // Swaps `constraint_lower_bounds` and `constraint_upper_bounds` with the ones
402 // on the sharded quadratic program.
403 void SwapConstraintBounds(VectorXd& constraint_lower_bounds,
404 VectorXd& constraint_upper_bounds) {
405 sharded_qp_.SwapConstraintBounds(constraint_lower_bounds,
406 constraint_upper_bounds);
407 }
408
409 // Swaps `objective` with the objective vector on the quadratic program.
410 // Swapping `objective_matrix` is not yet supported because it hasn't been
411 // needed.
412 void SwapObjectiveVector(VectorXd& objective) {
413 sharded_qp_.SwapObjectiveVector(objective);
414 }
415
416 const QuadraticProgramBoundNorms& OriginalBoundNorms() const {
417 return original_bound_norms_;
418 }
419
420 SolverLogger& Logger() { return logger_; }
421
422 private:
423 struct PresolveInfo {
424 explicit PresolveInfo(ShardedQuadraticProgram original_qp,
425 const PrimalDualHybridGradientParams& params)
426 : preprocessor_parameters(PreprocessorParameters(params)),
427 preprocessor(&preprocessor_parameters),
428 sharded_original_qp(std::move(original_qp)),
429 trivial_col_scaling_vec(
430 OnesVector(sharded_original_qp.PrimalSharder())),
431 trivial_row_scaling_vec(
432 OnesVector(sharded_original_qp.DualSharder())) {}
433
434 glop::GlopParameters preprocessor_parameters;
435 glop::MainLpPreprocessor preprocessor;
436 ShardedQuadraticProgram sharded_original_qp;
437 bool presolved_problem_was_maximization = false;
438 const VectorXd trivial_col_scaling_vec, trivial_row_scaling_vec;
439 };
440
441 // TODO(user): experiment with different preprocessor types.
442 static glop::GlopParameters PreprocessorParameters(
443 const PrimalDualHybridGradientParams& params);
444
445 // If presolve is enabled, moves `sharded_qp_` to
446 // `presolve_info_.sharded_original_qp` and computes the presolved linear
447 // program and installs it in `sharded_qp_`. Clears `initial_solution` if
448 // presolve is enabled. If presolve solves the problem completely returns the
449 // appropriate `TerminationReason`. Otherwise returns nullopt. If presolve
450 // is disabled or an error occurs modifies nothing and returns nullopt.
451 std::optional<TerminationReason> ApplyPresolveIfEnabled(
452 const PrimalDualHybridGradientParams& params,
453 std::optional<PrimalAndDualSolution>* initial_solution);
454
455 void ComputeAndApplyRescaling(const PrimalDualHybridGradientParams& params,
456 VectorXd& starting_primal_solution,
457 VectorXd& starting_dual_solution);
458
459 void LogQuadraticProgramStats(const QuadraticProgramStats& stats) const;
460
461 double InitialPrimalWeight(const PrimalDualHybridGradientParams& params,
462 double l2_norm_primal_linear_objective,
463 double l2_norm_constraint_bounds) const;
464
465 PrimalAndDualSolution RecoverOriginalSolution(
466 PrimalAndDualSolution working_solution) const;
467
468 // Adds one entry of `PointMetadata` to `stats` using the input solutions.
469 void AddPointMetadata(const PrimalDualHybridGradientParams& params,
470 const VectorXd& primal_solution,
471 const VectorXd& dual_solution, PointType point_type,
472 const VectorXd& last_primal_start_point,
473 const VectorXd& last_dual_start_point,
474 IterationStats& stats) const;
475
476 const QuadraticProgram& Qp() const { return sharded_qp_.Qp(); }
477
478 const int num_threads_;
479 const int num_shards_;
480
481 // The bound norms of the original problem.
482 QuadraticProgramBoundNorms original_bound_norms_;
483
484 // This is the QP that PDHG is run on. It is modified by presolve and
485 // rescaling, if those are enabled, and then serves as the
486 // `ShardedWorkingQp()` when calling `Solver::Solve()`. The original problem
487 // is available in `presolve_info_->sharded_original_qp` if
488 // `presolve_info_.has_value()`, and otherwise can be obtained by undoing the
489 // scaling of `sharded_qp_` by `col_scaling_vec_` and `row_scaling_vec_`.
490 ShardedQuadraticProgram sharded_qp_;
491
492 // Set iff presolve is enabled.
493 std::optional<PresolveInfo> presolve_info_;
494
495 // The scaling vectors that map the original (or presolved) quadratic program
496 // to the working version. See
497 // `ShardedQuadraticProgram::RescaleQuadraticProgram()` for details.
498 VectorXd col_scaling_vec_;
499 VectorXd row_scaling_vec_;
500
501 // A counter used to trigger writing iteration headers.
502 int log_counter_ = 0;
503 absl::Time time_of_last_log_ = absl::InfinitePast();
504 SolverLogger& logger_;
505 IterationStatsCallback iteration_stats_callback_;
506};
507
508class Solver {
509 public:
510 // `preprocess_solver` should not be nullptr, and the `PreprocessSolver`
511 // object must outlive this `Solver` object. Ownership is not transferred.
512 explicit Solver(const PrimalDualHybridGradientParams& params,
513 VectorXd starting_primal_solution,
514 VectorXd starting_dual_solution, double initial_step_size,
515 double initial_primal_weight,
516 PreprocessSolver* preprocess_solver);
517
518 // Not copyable or movable (because there are const members).
519 Solver(const Solver&) = delete;
520 Solver& operator=(const Solver&) = delete;
521 Solver(Solver&&) = delete;
522 Solver& operator=(Solver&&) = delete;
523
524 const QuadraticProgram& WorkingQp() const { return ShardedWorkingQp().Qp(); }
525
526 const ShardedQuadraticProgram& ShardedWorkingQp() const {
527 return preprocess_solver_->ShardedWorkingQp();
528 }
529
530 // Runs PDHG iterations on the instance that has been initialized in `Solver`.
531 // If `interrupt_solve` is not nullptr, then the solver will periodically
532 // check if `interrupt_solve->load()` is true, in which case the solve will
533 // terminate with `TERMINATION_REASON_INTERRUPTED_BY_USER`. Ownership is not
534 // transferred.
535 // `solve_log` should contain initial problem statistics.
536 // On return, `SolveResult.reduced_costs` will be empty, and the solution will
537 // be to the preprocessed/scaled problem.
538 SolverResult Solve(IterationType iteration_type,
539 const std::atomic<bool>* interrupt_solve,
540 SolveLog solve_log);
541
542 private:
543 struct NextSolutionAndDelta {
544 VectorXd value;
545 // `delta` is `value` - current_solution.
546 VectorXd delta;
547 };
548
549 struct DistanceBasedRestartInfo {
550 double distance_moved_last_restart_period;
551 int length_of_last_restart_period;
552 };
553
554 // Movement terms (weighted squared norms of primal and dual deltas) larger
555 // than this cause termination because iterates are diverging, and likely to
556 // cause infinite and NaN values.
557 constexpr static double kDivergentMovement = 1.0e100;
558
559 // Attempts to solve primal and dual feasibility subproblems starting at the
560 // average iterate, for at most `iteration_limit` iterations each. If
561 // successful, returns a `SolverResult`, otherwise nullopt. Appends
562 // information about the polishing phases to
563 // `solve_log.feasibility_polishing_details`.
564 std::optional<SolverResult> TryFeasibilityPolishing(
565 int iteration_limit, const std::atomic<bool>* interrupt_solve,
566 SolveLog& solve_log);
567
568 // Tries to find primal feasibility, adds the solve log details to
569 // `solve_log.feasibility_polishing_details`, and returns the result.
570 SolverResult TryPrimalPolishing(VectorXd starting_primal_solution,
571 int iteration_limit,
572 const std::atomic<bool>* interrupt_solve,
573 SolveLog& solve_log);
574
575 // Tries to find dual feasibility, adds the solve log details to
576 // `solve_log.feasibility_polishing_details`, and returns the result.
577 SolverResult TryDualPolishing(VectorXd starting_dual_solution,
578 int iteration_limit,
579 const std::atomic<bool>* interrupt_solve,
580 SolveLog& solve_log);
581
582 NextSolutionAndDelta ComputeNextPrimalSolution(double primal_step_size) const;
583
584 NextSolutionAndDelta ComputeNextDualSolution(
585 double dual_step_size, double extrapolation_factor,
586 const NextSolutionAndDelta& next_primal) const;
587
588 std::pair<double, double> ComputeMovementTerms(
589 const VectorXd& delta_primal, const VectorXd& delta_dual) const;
590
591 double ComputeMovement(const VectorXd& delta_primal,
592 const VectorXd& delta_dual) const;
593
594 double ComputeNonlinearity(const VectorXd& delta_primal,
595 const VectorXd& next_dual_product) const;
596
597 // Creates all the simple-to-compute statistics in stats.
598 IterationStats CreateSimpleIterationStats(RestartChoice restart_used) const;
599
600 // Returns the total work so far from the main iterations and feasibility
601 // polishing phases.
602 IterationStats TotalWorkSoFar(const SolveLog& solve_log) const;
603
604 RestartChoice ChooseRestartToApply(bool is_major_iteration);
605
606 VectorXd PrimalAverage() const;
607
608 VectorXd DualAverage() const;
609
610 double ComputeNewPrimalWeight() const;
611
612 // Picks the primal and dual solutions according to `output_type`, and makes
613 // the closing changes to `solve_log`. This function should only be called
614 // once when the solver is finishing its execution.
615 // NOTE: `primal_solution` and `dual_solution` are used as the output except
616 // when `output_type` is `POINT_TYPE_CURRENT_ITERATE` or
617 // `POINT_TYPE_ITERATE_DIFFERENCE`, in which case the values are computed from
618 // `Solver` data.
619 // NOTE: `primal_solution`, `dual_solution`, and `solve_log` are passed by
620 // value. To avoid unnecessary copying, move these objects (i.e. use
621 // `std::move()`).
622 SolverResult PickSolutionAndConstructSolverResult(
623 VectorXd primal_solution, VectorXd dual_solution,
624 const IterationStats& stats, TerminationReason termination_reason,
625 PointType output_type, SolveLog solve_log) const;
626
627 double DistanceTraveledFromLastStart(const VectorXd& primal_solution,
628 const VectorXd& dual_solution) const;
629
630 LocalizedLagrangianBounds ComputeLocalizedBoundsAtCurrent() const;
631
632 LocalizedLagrangianBounds ComputeLocalizedBoundsAtAverage() const;
633
634 // Applies the given `RestartChoice`. If a restart is chosen, updates the
635 // state of the algorithm accordingly and computes a new primal weight.
636 void ApplyRestartChoice(RestartChoice restart_to_apply);
637
638 std::optional<SolverResult> MajorIterationAndTerminationCheck(
639 IterationType iteration_type, bool force_numerical_termination,
640 const std::atomic<bool>* interrupt_solve,
641 const IterationStats& work_from_feasibility_polishing,
642 SolveLog& solve_log);
643
644 bool ShouldDoAdaptiveRestartHeuristic(double candidate_normalized_gap) const;
645
646 RestartChoice DetermineDistanceBasedRestartChoice() const;
647
648 void ResetAverageToCurrent();
649
650 void LogNumericalTermination(const Eigen::VectorXd& primal_delta,
651 const Eigen::VectorXd& dual_delta) const;
652
653 void LogInnerIterationLimitHit() const;
654
655 // Takes a step based on the Malitsky and Pock linesearch algorithm.
656 // (https://arxiv.org/pdf/1608.08883.pdf)
657 // The current implementation is provably convergent (at an optimal rate)
658 // for LP programs (provided we do not change the primal weight at every major
659 // iteration). Further, we have observed that this rule is very sensitive to
660 // the parameter choice whenever we apply the primal weight recomputation
661 // heuristic.
662 InnerStepOutcome TakeMalitskyPockStep();
663
664 // Takes a step based on the adaptive heuristic presented in Section 3.1 of
665 // https://arxiv.org/pdf/2106.04756.pdf (further generalized to QP).
666 InnerStepOutcome TakeAdaptiveStep();
667
668 // Takes a constant-size step.
669 InnerStepOutcome TakeConstantSizeStep();
670
671 const PrimalDualHybridGradientParams params_;
672
673 VectorXd current_primal_solution_;
674 VectorXd current_dual_solution_;
675 VectorXd current_primal_delta_;
676 VectorXd current_dual_delta_;
677
678 ShardedWeightedAverage primal_average_;
679 ShardedWeightedAverage dual_average_;
680
681 double step_size_;
682 double primal_weight_;
683
684 PreprocessSolver* preprocess_solver_;
685
686 // For Malitsky-Pock linesearch only: `step_size_` / previous_step_size
687 double ratio_last_two_step_sizes_;
688 // For adaptive restarts only.
689 double normalized_gap_at_last_trial_ =
690 std::numeric_limits<double>::infinity();
691 // For adaptive restarts only.
692 double normalized_gap_at_last_restart_ =
693 std::numeric_limits<double>::infinity();
694
695 // `preprocessing_time_sec_` is added to the current value of `timer_` when
696 // computing `cumulative_time_sec` in iteration stats.
697 double preprocessing_time_sec_;
698 WallTimer timer_;
699 int iterations_completed_;
700 int num_rejected_steps_;
701 // A cache of `constraint_matrix.transpose() * current_dual_solution_`.
702 VectorXd current_dual_product_;
703 // The primal point at which the algorithm was last restarted from, or
704 // the initial primal starting point if no restart has occurred.
705 VectorXd last_primal_start_point_;
706 // The dual point at which the algorithm was last restarted from, or
707 // the initial dual starting point if no restart has occurred.
708 VectorXd last_dual_start_point_;
709 // Information for deciding whether to trigger a distance-based restart.
710 // The distances are initialized to +inf to force a restart during the first
711 // major iteration check.
712 DistanceBasedRestartInfo distance_based_restart_info_ = {
713 .distance_moved_last_restart_period =
714 std::numeric_limits<double>::infinity(),
715 .length_of_last_restart_period = 1,
716 };
717};
718
719PreprocessSolver::PreprocessSolver(QuadraticProgram qp,
720 const PrimalDualHybridGradientParams& params,
721 SolverLogger* logger)
722 : num_threads_(
723 NumThreads(params.num_threads(), params.num_shards(), qp, *logger)),
724 num_shards_(NumShards(num_threads_, params.num_shards())),
725 sharded_qp_(std::move(qp), num_threads_, num_shards_,
726 params.scheduler_type(), nullptr),
727 logger_(*logger) {}
728
729SolverResult ErrorSolverResult(const TerminationReason reason,
730 const std::string& message,
731 SolverLogger& logger) {
732 SolveLog error_log;
733 error_log.set_termination_reason(reason);
734 error_log.set_termination_string(message);
735 SOLVER_LOG(&logger,
736 "The solver did not run because of invalid input: ", message);
737 return SolverResult{.solve_log = error_log};
738}
739
740// If `check_excessively_small_values`, in addition to checking for extreme
741// values that PDLP can't handle, also check for extremely small constraint
742// bounds, variable bound gaps, and objective vector values that PDLP can handle
743// but that cause problems for other code such as glop's presolve.
744std::optional<SolverResult> CheckProblemStats(
745 const QuadraticProgramStats& problem_stats, const double objective_offset,
746 bool check_excessively_small_values, SolverLogger& logger) {
747 const double kExcessiveInputValue = 1e50;
748 const double kExcessivelySmallInputValue = 1e-50;
749 const double kMaxDynamicRange = 1e20;
750 if (std::isnan(problem_stats.constraint_matrix_l2_norm())) {
751 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
752 "Constraint matrix has a NAN.", logger);
753 }
754 if (problem_stats.constraint_matrix_abs_max() > kExcessiveInputValue) {
755 return ErrorSolverResult(
756 TERMINATION_REASON_INVALID_PROBLEM,
757 absl::StrCat("Constraint matrix has a non-zero with absolute value ",
758 problem_stats.constraint_matrix_abs_max(),
759 " which exceeds limit of ", kExcessiveInputValue, "."),
760 logger);
761 }
762 if (problem_stats.constraint_matrix_abs_max() >
763 kMaxDynamicRange * problem_stats.constraint_matrix_abs_min()) {
765 &logger, "WARNING: Constraint matrix has largest absolute value ",
766 problem_stats.constraint_matrix_abs_max(),
767 " and smallest non-zero absolute value ",
768 problem_stats.constraint_matrix_abs_min(), " performance may suffer.");
769 }
770 if (problem_stats.constraint_matrix_col_min_l_inf_norm() > 0 &&
771 problem_stats.constraint_matrix_col_min_l_inf_norm() <
772 kExcessivelySmallInputValue) {
773 return ErrorSolverResult(
774 TERMINATION_REASON_INVALID_PROBLEM,
775 absl::StrCat("Constraint matrix has a column with Linf norm ",
776 problem_stats.constraint_matrix_col_min_l_inf_norm(),
777 " which is less than limit of ",
778 kExcessivelySmallInputValue, "."),
779 logger);
780 }
781 if (problem_stats.constraint_matrix_row_min_l_inf_norm() > 0 &&
782 problem_stats.constraint_matrix_row_min_l_inf_norm() <
783 kExcessivelySmallInputValue) {
784 return ErrorSolverResult(
785 TERMINATION_REASON_INVALID_PROBLEM,
786 absl::StrCat("Constraint matrix has a row with Linf norm ",
787 problem_stats.constraint_matrix_row_min_l_inf_norm(),
788 " which is less than limit of ",
789 kExcessivelySmallInputValue, "."),
790 logger);
791 }
792 if (std::isnan(problem_stats.combined_bounds_l2_norm())) {
793 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
794 "Constraint bounds vector has a NAN.", logger);
795 }
796 if (problem_stats.combined_bounds_max() > kExcessiveInputValue) {
797 return ErrorSolverResult(
798 TERMINATION_REASON_INVALID_PROBLEM,
799 absl::StrCat("Combined constraint bounds vector has a non-zero with "
800 "absolute value ",
801 problem_stats.combined_bounds_max(),
802 " which exceeds limit of ", kExcessiveInputValue, "."),
803 logger);
804 }
805 if (check_excessively_small_values &&
806 problem_stats.combined_bounds_min() > 0 &&
807 problem_stats.combined_bounds_min() < kExcessivelySmallInputValue) {
808 return ErrorSolverResult(
809 TERMINATION_REASON_INVALID_PROBLEM,
810 absl::StrCat("Combined constraint bounds vector has a non-zero with "
811 "absolute value ",
812 problem_stats.combined_bounds_min(),
813 " which is less than the limit of ",
814 kExcessivelySmallInputValue, "."),
815 logger);
816 }
817 if (problem_stats.combined_bounds_max() >
818 kMaxDynamicRange * problem_stats.combined_bounds_min()) {
819 SOLVER_LOG(&logger,
820 "WARNING: Combined constraint bounds vector has largest "
821 "absolute value ",
822 problem_stats.combined_bounds_max(),
823 " and smallest non-zero absolute value ",
824 problem_stats.combined_bounds_min(),
825 "; performance may suffer.");
826 }
827 if (std::isnan(problem_stats.combined_variable_bounds_l2_norm())) {
828 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
829 "Variable bounds vector has a NAN.", logger);
830 }
831 if (problem_stats.combined_variable_bounds_max() > kExcessiveInputValue) {
832 return ErrorSolverResult(
833 TERMINATION_REASON_INVALID_PROBLEM,
834 absl::StrCat("Combined variable bounds vector has a non-zero with "
835 "absolute value ",
836 problem_stats.combined_variable_bounds_max(),
837 " which exceeds limit of ", kExcessiveInputValue, "."),
838 logger);
839 }
840 if (check_excessively_small_values &&
841 problem_stats.combined_variable_bounds_min() > 0 &&
842 problem_stats.combined_variable_bounds_min() <
843 kExcessivelySmallInputValue) {
844 return ErrorSolverResult(
845 TERMINATION_REASON_INVALID_PROBLEM,
846 absl::StrCat("Combined variable bounds vector has a non-zero with "
847 "absolute value ",
848 problem_stats.combined_variable_bounds_min(),
849 " which is less than the limit of ",
850 kExcessivelySmallInputValue, "."),
851 logger);
852 }
853 if (problem_stats.combined_variable_bounds_max() >
854 kMaxDynamicRange * problem_stats.combined_variable_bounds_min()) {
856 &logger,
857 "WARNING: Combined variable bounds vector has largest absolute value ",
858 problem_stats.combined_variable_bounds_max(),
859 " and smallest non-zero absolute value ",
860 problem_stats.combined_variable_bounds_min(),
861 "; performance may suffer.");
862 }
863 if (problem_stats.variable_bound_gaps_max() >
864 kMaxDynamicRange * problem_stats.variable_bound_gaps_min()) {
865 SOLVER_LOG(&logger,
866 "WARNING: Variable bound gap vector has largest absolute value ",
867 problem_stats.variable_bound_gaps_max(),
868 " and smallest non-zero absolute value ",
869 problem_stats.variable_bound_gaps_min(),
870 "; performance may suffer.");
871 }
872 if (std::isnan(objective_offset)) {
873 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
874 "Objective offset is NAN.", logger);
875 }
876 if (std::abs(objective_offset) > kExcessiveInputValue) {
877 return ErrorSolverResult(
878 TERMINATION_REASON_INVALID_PROBLEM,
879 absl::StrCat("Objective offset ", objective_offset,
880 " has absolute value which exceeds limit of ",
881 kExcessiveInputValue, "."),
882 logger);
883 }
884 if (std::isnan(problem_stats.objective_vector_l2_norm())) {
885 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
886 "Objective vector has a NAN.", logger);
887 }
888 if (problem_stats.objective_vector_abs_max() > kExcessiveInputValue) {
889 return ErrorSolverResult(
890 TERMINATION_REASON_INVALID_PROBLEM,
891 absl::StrCat("Objective vector has a non-zero with absolute value ",
892 problem_stats.objective_vector_abs_max(),
893 " which exceeds limit of ", kExcessiveInputValue, "."),
894 logger);
895 }
896 if (check_excessively_small_values &&
897 problem_stats.objective_vector_abs_min() > 0 &&
898 problem_stats.objective_vector_abs_min() < kExcessivelySmallInputValue) {
899 return ErrorSolverResult(
900 TERMINATION_REASON_INVALID_PROBLEM,
901 absl::StrCat("Objective vector has a non-zero with absolute value ",
902 problem_stats.objective_vector_abs_min(),
903 " which is less than the limit of ",
904 kExcessivelySmallInputValue, "."),
905 logger);
906 }
907 if (problem_stats.objective_vector_abs_max() >
908 kMaxDynamicRange * problem_stats.objective_vector_abs_min()) {
909 SOLVER_LOG(&logger, "WARNING: Objective vector has largest absolute value ",
910 problem_stats.objective_vector_abs_max(),
911 " and smallest non-zero absolute value ",
912 problem_stats.objective_vector_abs_min(),
913 "; performance may suffer.");
914 }
915 if (std::isnan(problem_stats.objective_matrix_l2_norm())) {
916 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
917 "Objective matrix has a NAN.", logger);
918 }
919 if (problem_stats.objective_matrix_abs_max() > kExcessiveInputValue) {
920 return ErrorSolverResult(
921 TERMINATION_REASON_INVALID_PROBLEM,
922 absl::StrCat("Objective matrix has a non-zero with absolute value ",
923 problem_stats.objective_matrix_abs_max(),
924 " which exceeds limit of ", kExcessiveInputValue, "."),
925 logger);
926 }
927 if (problem_stats.objective_matrix_abs_max() >
928 kMaxDynamicRange * problem_stats.objective_matrix_abs_min()) {
929 SOLVER_LOG(&logger, "WARNING: Objective matrix has largest absolute value ",
930 problem_stats.objective_matrix_abs_max(),
931 " and smallest non-zero absolute value ",
932 problem_stats.objective_matrix_abs_min(),
933 "; performance may suffer.");
934 }
935 return std::nullopt;
936}
937
938std::optional<SolverResult> CheckInitialSolution(
939 const ShardedQuadraticProgram& sharded_qp,
940 const PrimalAndDualSolution& initial_solution, SolverLogger& logger) {
941 const double kExcessiveInputValue = 1e50;
942 if (initial_solution.primal_solution.size() != sharded_qp.PrimalSize()) {
943 return ErrorSolverResult(
944 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
945 absl::StrCat("Initial primal solution has size ",
946 initial_solution.primal_solution.size(),
947 " which differs from problem primal size ",
948 sharded_qp.PrimalSize()),
949 logger);
950 }
951 if (std::isnan(
952 Norm(initial_solution.primal_solution, sharded_qp.PrimalSharder()))) {
953 return ErrorSolverResult(TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
954 "Initial primal solution has a NAN.", logger);
955 }
956 if (const double norm = LInfNorm(initial_solution.primal_solution,
957 sharded_qp.PrimalSharder());
958 norm > kExcessiveInputValue) {
959 return ErrorSolverResult(
960 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
961 absl::StrCat(
962 "Initial primal solution has an entry with absolute value ", norm,
963 " which exceeds limit of ", kExcessiveInputValue),
964 logger);
965 }
966 if (initial_solution.dual_solution.size() != sharded_qp.DualSize()) {
967 return ErrorSolverResult(
968 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
969 absl::StrCat("Initial dual solution has size ",
970 initial_solution.dual_solution.size(),
971 " which differs from problem dual size ",
972 sharded_qp.DualSize()),
973 logger);
974 }
975 if (std::isnan(
976 Norm(initial_solution.dual_solution, sharded_qp.DualSharder()))) {
977 return ErrorSolverResult(TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
978 "Initial dual solution has a NAN.", logger);
979 }
980 if (const double norm =
981 LInfNorm(initial_solution.dual_solution, sharded_qp.DualSharder());
982 norm > kExcessiveInputValue) {
983 return ErrorSolverResult(
984 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
985 absl::StrCat("Initial dual solution has an entry with absolute value ",
986 norm, " which exceeds limit of ", kExcessiveInputValue),
987 logger);
988 }
989 return std::nullopt;
990}
991
992SolverResult PreprocessSolver::PreprocessAndSolve(
993 const PrimalDualHybridGradientParams& params,
994 std::optional<PrimalAndDualSolution> initial_solution,
995 const std::atomic<bool>* interrupt_solve,
996 IterationStatsCallback iteration_stats_callback) {
997 WallTimer timer;
998 timer.Start();
999 SolveLog solve_log;
1000 if (params.verbosity_level() >= 1) {
1001 SOLVER_LOG(&logger_, "Solving with PDLP parameters: ", params);
1002 }
1003 if (Qp().problem_name.has_value()) {
1004 solve_log.set_instance_name(*Qp().problem_name);
1005 }
1006 *solve_log.mutable_params() = params;
1007 sharded_qp_.ReplaceLargeConstraintBoundsWithInfinity(
1008 params.infinite_constraint_bound_threshold());
1009 if (!HasValidBounds(sharded_qp_)) {
1010 return ErrorSolverResult(
1011 TERMINATION_REASON_INVALID_PROBLEM,
1012 "The input problem has invalid bounds (after replacing large "
1013 "constraint bounds with infinity): some variable or constraint has "
1014 "lower_bound > upper_bound, lower_bound == inf, or upper_bound == "
1015 "-inf.",
1016 logger_);
1017 }
1018 if (Qp().objective_matrix.has_value() &&
1019 !sharded_qp_.PrimalSharder().ParallelTrueForAllShards(
1020 [&](const Sharder::Shard& shard) -> bool {
1021 return (shard(Qp().objective_matrix->diagonal()).array() >= 0.0)
1022 .all();
1023 })) {
1024 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
1025 "The objective is not convex (i.e., the objective "
1026 "matrix contains negative or NAN entries).",
1027 logger_);
1028 }
1029 *solve_log.mutable_original_problem_stats() = ComputeStats(sharded_qp_);
1030 const QuadraticProgramStats& original_problem_stats =
1031 solve_log.original_problem_stats();
1032 if (auto maybe_result =
1033 CheckProblemStats(original_problem_stats, Qp().objective_offset,
1034 params.presolve_options().use_glop(), logger_);
1035 maybe_result.has_value()) {
1036 return *maybe_result;
1037 }
1038 if (initial_solution.has_value()) {
1039 if (auto maybe_result =
1040 CheckInitialSolution(sharded_qp_, *initial_solution, logger_);
1041 maybe_result.has_value()) {
1042 return *maybe_result;
1043 }
1044 }
1045 original_bound_norms_ = BoundNormsFromProblemStats(original_problem_stats);
1046 const std::string preprocessing_string = absl::StrCat(
1047 params.presolve_options().use_glop() ? "presolving and " : "",
1048 "rescaling:");
1049 if (params.verbosity_level() >= 1) {
1050 SOLVER_LOG(&logger_, "Problem stats before ", preprocessing_string);
1051 LogQuadraticProgramStats(solve_log.original_problem_stats());
1052 }
1053 iteration_stats_callback_ = std::move(iteration_stats_callback);
1054 std::optional<TerminationReason> maybe_terminate =
1055 ApplyPresolveIfEnabled(params, &initial_solution);
1056 if (maybe_terminate.has_value()) {
1057 // Glop also feeds zero primal and dual solutions when the preprocessor
1058 // has a non-INIT status. When the preprocessor status is optimal the
1059 // vectors have length 0. When the status is something else the lengths
1060 // may be non-zero, but that's OK since we don't promise to produce a
1061 // meaningful solution in that case.
1062 IterationStats iteration_stats;
1063 iteration_stats.set_cumulative_time_sec(timer.Get());
1064 solve_log.set_preprocessing_time_sec(iteration_stats.cumulative_time_sec());
1065 VectorXd working_primal = ZeroVector(sharded_qp_.PrimalSharder());
1066 VectorXd working_dual = ZeroVector(sharded_qp_.DualSharder());
1067 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1068 params, working_primal, working_dual, POINT_TYPE_PRESOLVER_SOLUTION,
1069 iteration_stats.add_convergence_information(),
1070 iteration_stats.add_infeasibility_information());
1071 std::optional<TerminationReasonAndPointType> earned_termination =
1072 CheckIterateTerminationCriteria(params.termination_criteria(),
1073 iteration_stats, original_bound_norms_,
1074 /*force_numerical_termination=*/false);
1075 if (!earned_termination.has_value()) {
1076 earned_termination = CheckSimpleTerminationCriteria(
1077 params.termination_criteria(), iteration_stats, interrupt_solve);
1078 }
1079 TerminationReason final_termination_reason;
1080 if (earned_termination.has_value() &&
1081 (earned_termination->reason == TERMINATION_REASON_OPTIMAL ||
1082 earned_termination->reason == TERMINATION_REASON_PRIMAL_INFEASIBLE ||
1083 earned_termination->reason == TERMINATION_REASON_DUAL_INFEASIBLE)) {
1084 final_termination_reason = earned_termination->reason;
1085 } else {
1086 if (*maybe_terminate == TERMINATION_REASON_OPTIMAL) {
1087 final_termination_reason = TERMINATION_REASON_NUMERICAL_ERROR;
1088 SOLVER_LOG(
1089 &logger_,
1090 "WARNING: Presolve claimed to solve the LP optimally but the "
1091 "solution doesn't satisfy the optimality criteria.");
1092 } else {
1093 final_termination_reason = *maybe_terminate;
1094 }
1095 }
1096 return ConstructOriginalSolverResult(
1097 params,
1098 ConstructSolverResult(
1099 std::move(working_primal), std::move(working_dual),
1100 std::move(iteration_stats), final_termination_reason,
1101 POINT_TYPE_PRESOLVER_SOLUTION, std::move(solve_log)),
1102 logger_);
1103 }
1104
1105 VectorXd starting_primal_solution;
1106 VectorXd starting_dual_solution;
1107 // The current solution is updated by `ComputeAndApplyRescaling`.
1108 if (initial_solution.has_value()) {
1109 starting_primal_solution = std::move(initial_solution->primal_solution);
1110 starting_dual_solution = std::move(initial_solution->dual_solution);
1111 } else {
1112 SetZero(sharded_qp_.PrimalSharder(), starting_primal_solution);
1113 SetZero(sharded_qp_.DualSharder(), starting_dual_solution);
1114 }
1115 // The following projections are necessary since all our checks assume that
1116 // the primal and dual variable bounds are satisfied.
1117 ProjectToPrimalVariableBounds(sharded_qp_, starting_primal_solution);
1118 ProjectToDualVariableBounds(sharded_qp_, starting_dual_solution);
1119
1120 ComputeAndApplyRescaling(params, starting_primal_solution,
1121 starting_dual_solution);
1122 *solve_log.mutable_preprocessed_problem_stats() = ComputeStats(sharded_qp_);
1123 if (params.verbosity_level() >= 1) {
1124 SOLVER_LOG(&logger_, "Problem stats after ", preprocessing_string);
1125 LogQuadraticProgramStats(solve_log.preprocessed_problem_stats());
1126 }
1127
1128 double step_size = 0.0;
1129 if (params.linesearch_rule() ==
1130 PrimalDualHybridGradientParams::CONSTANT_STEP_SIZE_RULE) {
1131 std::mt19937 random(1);
1132 double inverse_step_size;
1133 const auto lipschitz_result =
1135 sharded_qp_, std::nullopt, std::nullopt,
1136 /*desired_relative_error=*/0.2, /*failure_probability=*/0.0005,
1137 random);
1138 // With high probability, `lipschitz_result.singular_value` is within
1139 // +/- `lipschitz_result.estimated_relative_error
1140 // * lipschitz_result.singular_value`
1141 const double lipschitz_term_upper_bound =
1142 lipschitz_result.singular_value /
1143 (1.0 - lipschitz_result.estimated_relative_error);
1144 inverse_step_size = lipschitz_term_upper_bound;
1145 step_size = inverse_step_size > 0.0 ? 1.0 / inverse_step_size : 1.0;
1146 } else {
1147 // This initial step size is designed to err on the side of being too big.
1148 // This is because
1149 // (i) too-big steps are rejected and hence don't hurt us other than
1150 // wasting
1151 // an iteration and
1152 // (ii) the step size adjustment algorithm shrinks the step size as far as
1153 // needed in a single iteration but raises it slowly.
1154 // The tiny constant is there to keep the step size finite in the case of a
1155 // trivial LP with no constraints.
1156 step_size =
1157 1.0 /
1158 std::max(
1159 1.0e-20,
1160 solve_log.preprocessed_problem_stats().constraint_matrix_abs_max());
1161 }
1162 step_size *= params.initial_step_size_scaling();
1163
1164 const double primal_weight = InitialPrimalWeight(
1165 params, solve_log.preprocessed_problem_stats().objective_vector_l2_norm(),
1166 solve_log.preprocessed_problem_stats().combined_bounds_l2_norm());
1167
1168 Solver solver(params, starting_primal_solution, starting_dual_solution,
1169 step_size, primal_weight, this);
1170 solve_log.set_preprocessing_time_sec(timer.Get());
1171 SolverResult result = solver.Solve(IterationType::kNormal, interrupt_solve,
1172 std::move(solve_log));
1173 return ConstructOriginalSolverResult(params, std::move(result), logger_);
1174}
1175
1176glop::GlopParameters PreprocessSolver::PreprocessorParameters(
1177 const PrimalDualHybridGradientParams& params) {
1178 glop::GlopParameters glop_params;
1179 // TODO(user): Test if dualization helps or hurts performance.
1180 glop_params.set_solve_dual_problem(glop::GlopParameters::NEVER_DO);
1181 // Experiments show that this preprocessing step can hurt because it relaxes
1182 // variable bounds.
1183 glop_params.set_use_implied_free_preprocessor(false);
1184 // We do our own scaling.
1185 glop_params.set_use_scaling(false);
1186 if (params.presolve_options().has_glop_parameters()) {
1187 glop_params.MergeFrom(params.presolve_options().glop_parameters());
1188 }
1189 return glop_params;
1190}
1191
1192TerminationReason GlopStatusToTerminationReason(
1193 const glop::ProblemStatus glop_status, SolverLogger& logger) {
1194 switch (glop_status) {
1195 case glop::ProblemStatus::OPTIMAL:
1196 return TERMINATION_REASON_OPTIMAL;
1197 case glop::ProblemStatus::INVALID_PROBLEM:
1198 return TERMINATION_REASON_INVALID_PROBLEM;
1199 case glop::ProblemStatus::ABNORMAL:
1200 case glop::ProblemStatus::IMPRECISE:
1201 return TERMINATION_REASON_NUMERICAL_ERROR;
1202 case glop::ProblemStatus::PRIMAL_INFEASIBLE:
1203 case glop::ProblemStatus::DUAL_INFEASIBLE:
1204 case glop::ProblemStatus::INFEASIBLE_OR_UNBOUNDED:
1205 case glop::ProblemStatus::DUAL_UNBOUNDED:
1206 case glop::ProblemStatus::PRIMAL_UNBOUNDED:
1207 return TERMINATION_REASON_PRIMAL_OR_DUAL_INFEASIBLE;
1208 default:
1209 SOLVER_LOG(&logger, "WARNING: Unexpected preprocessor status ",
1210 glop_status);
1211 return TERMINATION_REASON_OTHER;
1212 }
1213}
1214
1215std::optional<TerminationReason> PreprocessSolver::ApplyPresolveIfEnabled(
1216 const PrimalDualHybridGradientParams& params,
1217 std::optional<PrimalAndDualSolution>* const initial_solution) {
1218 const bool presolve_enabled = params.presolve_options().use_glop();
1219 if (!presolve_enabled) {
1220 return std::nullopt;
1221 }
1222 if (!IsLinearProgram(Qp())) {
1223 SOLVER_LOG(&logger_,
1224 "WARNING: Skipping presolve, which is only supported for linear "
1225 "programs");
1226 return std::nullopt;
1227 }
1228 absl::StatusOr<MPModelProto> model = QpToMpModelProto(Qp());
1229 if (!model.ok()) {
1230 SOLVER_LOG(&logger_,
1231 "WARNING: Skipping presolve because of error converting to "
1232 "MPModelProto: ",
1233 model.status());
1234 return std::nullopt;
1235 }
1236 if (initial_solution->has_value()) {
1237 SOLVER_LOG(&logger_,
1238 "WARNING: Ignoring initial solution. Initial solutions are "
1239 "ignored when presolve is on.");
1240 initial_solution->reset();
1241 }
1242 glop::LinearProgram glop_lp;
1243 glop::MPModelProtoToLinearProgram(*model, &glop_lp);
1244 // Save RAM.
1245 model->Clear();
1246 presolve_info_.emplace(std::move(sharded_qp_), params);
1247 // To simplify our code we ignore the return value indicating whether
1248 // postprocessing is required. We simply call `RecoverSolution()`
1249 // unconditionally, which may do nothing.
1250 presolve_info_->preprocessor.Run(&glop_lp);
1251 presolve_info_->presolved_problem_was_maximization =
1252 glop_lp.IsMaximizationProblem();
1253 MPModelProto output;
1254 glop::LinearProgramToMPModelProto(glop_lp, &output);
1255 // This will only fail if given an invalid LP, which shouldn't happen.
1256 absl::StatusOr<QuadraticProgram> presolved_qp =
1257 QpFromMpModelProto(output, /*relax_integer_variables=*/false);
1258 CHECK_OK(presolved_qp.status());
1259 // `MPModelProto` doesn't support scaling factors, so if `glop_lp` has an
1260 // `objective_scaling_factor` it won't be set in output and `presolved_qp`.
1261 // The scaling factor of `presolved_qp` isn't actually used anywhere, but we
1262 // set it for completeness.
1263 presolved_qp->objective_scaling_factor = glop_lp.objective_scaling_factor();
1264 sharded_qp_ = ShardedQuadraticProgram(std::move(*presolved_qp), num_threads_,
1265 num_shards_, params.scheduler_type());
1266 // A status of `INIT` means the preprocessor created a (usually) smaller
1267 // problem that needs solving. Other statuses mean the preprocessor solved
1268 // the problem completely.
1269 if (presolve_info_->preprocessor.status() != glop::ProblemStatus::INIT) {
1270 col_scaling_vec_ = OnesVector(sharded_qp_.PrimalSharder());
1271 row_scaling_vec_ = OnesVector(sharded_qp_.DualSharder());
1272 return GlopStatusToTerminationReason(presolve_info_->preprocessor.status(),
1273 logger_);
1274 }
1275 return std::nullopt;
1276}
1277
1278void PreprocessSolver::ComputeAndApplyRescaling(
1279 const PrimalDualHybridGradientParams& params,
1280 VectorXd& starting_primal_solution, VectorXd& starting_dual_solution) {
1281 ScalingVectors scaling = ApplyRescaling(
1282 RescalingOptions{.l_inf_ruiz_iterations = params.l_inf_ruiz_iterations(),
1283 .l2_norm_rescaling = params.l2_norm_rescaling()},
1284 sharded_qp_);
1285 row_scaling_vec_ = std::move(scaling.row_scaling_vec);
1286 col_scaling_vec_ = std::move(scaling.col_scaling_vec);
1287
1288 CoefficientWiseQuotientInPlace(col_scaling_vec_, sharded_qp_.PrimalSharder(),
1289 starting_primal_solution);
1290 CoefficientWiseQuotientInPlace(row_scaling_vec_, sharded_qp_.DualSharder(),
1291 starting_dual_solution);
1292}
1293
1294void PreprocessSolver::LogQuadraticProgramStats(
1295 const QuadraticProgramStats& stats) const {
1296 SOLVER_LOG(&logger_,
1297 absl::StrFormat("There are %i variables, %i constraints, and %i "
1298 "constraint matrix nonzeros.",
1299 stats.num_variables(), stats.num_constraints(),
1300 stats.constraint_matrix_num_nonzeros()));
1301 if (Qp().constraint_matrix.nonZeros() > 0) {
1302 SOLVER_LOG(&logger_,
1303 absl::StrFormat("Absolute values of nonzero constraint matrix "
1304 "elements: largest=%f, "
1305 "smallest=%f, avg=%f",
1306 stats.constraint_matrix_abs_max(),
1307 stats.constraint_matrix_abs_min(),
1308 stats.constraint_matrix_abs_avg()));
1309 SOLVER_LOG(
1310 &logger_,
1311 absl::StrFormat("Constraint matrix, infinity norm: max(row & col)=%f, "
1312 "min_col=%f, min_row=%f",
1313 stats.constraint_matrix_abs_max(),
1314 stats.constraint_matrix_col_min_l_inf_norm(),
1315 stats.constraint_matrix_row_min_l_inf_norm()));
1316 SOLVER_LOG(
1317 &logger_,
1318 absl::StrFormat(
1319 "Constraint bounds statistics (max absolute value per row): "
1320 "largest=%f, smallest=%f, avg=%f, l2_norm=%f",
1321 stats.combined_bounds_max(), stats.combined_bounds_min(),
1322 stats.combined_bounds_avg(), stats.combined_bounds_l2_norm()));
1323 }
1324 if (!IsLinearProgram(Qp())) {
1325 SOLVER_LOG(&logger_,
1326 absl::StrFormat("There are %i nonzero diagonal coefficients in "
1327 "the objective matrix.",
1328 stats.objective_matrix_num_nonzeros()));
1329 SOLVER_LOG(
1330 &logger_,
1331 absl::StrFormat(
1332 "Absolute values of nonzero objective matrix elements: largest=%f, "
1333 "smallest=%f, avg=%f",
1334 stats.objective_matrix_abs_max(), stats.objective_matrix_abs_min(),
1335 stats.objective_matrix_abs_avg()));
1336 }
1337 SOLVER_LOG(&logger_, absl::StrFormat("Absolute values of objective vector "
1338 "elements: largest=%f, smallest=%f, "
1339 "avg=%f, l2_norm=%f",
1340 stats.objective_vector_abs_max(),
1341 stats.objective_vector_abs_min(),
1342 stats.objective_vector_abs_avg(),
1343 stats.objective_vector_l2_norm()));
1344 SOLVER_LOG(
1345 &logger_,
1346 absl::StrFormat(
1347 "Gaps between variable upper and lower bounds: #finite=%i of %i, "
1348 "largest=%f, smallest=%f, avg=%f",
1349 stats.variable_bound_gaps_num_finite(), stats.num_variables(),
1350 stats.variable_bound_gaps_max(), stats.variable_bound_gaps_min(),
1351 stats.variable_bound_gaps_avg()));
1352}
1353
1354double PreprocessSolver::InitialPrimalWeight(
1355 const PrimalDualHybridGradientParams& params,
1356 const double l2_norm_primal_linear_objective,
1357 const double l2_norm_constraint_bounds) const {
1358 if (params.has_initial_primal_weight()) {
1359 return params.initial_primal_weight();
1360 }
1361 if (l2_norm_primal_linear_objective > 0.0 &&
1362 l2_norm_constraint_bounds > 0.0) {
1363 // The hand-wavy motivation for this choice is that the objective vector
1364 // has units of (objective units)/(primal units) and the constraint
1365 // bounds vector has units of (objective units)/(dual units),
1366 // therefore this ratio has units (dual units)/(primal units). By
1367 // dimensional analysis, these are the same units as the primal weight.
1368 return l2_norm_primal_linear_objective / l2_norm_constraint_bounds;
1369 } else {
1370 return 1.0;
1371 }
1372}
1373
1374PrimalAndDualSolution PreprocessSolver::RecoverOriginalSolution(
1375 PrimalAndDualSolution working_solution) const {
1376 glop::ProblemSolution glop_solution(glop::RowIndex{0}, glop::ColIndex{0});
1377 if (presolve_info_.has_value()) {
1378 // We compute statuses relative to the working problem so we can detect when
1379 // variables are at their bounds without floating-point roundoff induced by
1380 // scaling.
1381 glop_solution = internal::ComputeStatuses(Qp(), working_solution);
1382 }
1383 CoefficientWiseProductInPlace(col_scaling_vec_, sharded_qp_.PrimalSharder(),
1384 working_solution.primal_solution);
1385 CoefficientWiseProductInPlace(row_scaling_vec_, sharded_qp_.DualSharder(),
1386 working_solution.dual_solution);
1387 if (presolve_info_.has_value()) {
1388 glop_solution.primal_values =
1389 glop::DenseRow(working_solution.primal_solution.begin(),
1390 working_solution.primal_solution.end());
1391 glop_solution.dual_values =
1392 glop::DenseColumn(working_solution.dual_solution.begin(),
1393 working_solution.dual_solution.end());
1394 // We got the working QP by calling `LinearProgramToMPModelProto()` and
1395 // `QpFromMpModelProto()`. We need to negate the duals if the LP resulting
1396 // from presolve was a max problem.
1397 if (presolve_info_->presolved_problem_was_maximization) {
1398 for (glop::RowIndex i{0}; i < glop_solution.dual_values.size(); ++i) {
1399 glop_solution.dual_values[i] *= -1;
1400 }
1401 }
1402 presolve_info_->preprocessor.RecoverSolution(&glop_solution);
1403 PrimalAndDualSolution solution;
1404 solution.primal_solution =
1405 Eigen::Map<Eigen::VectorXd>(glop_solution.primal_values.data(),
1406 glop_solution.primal_values.size().value());
1407 solution.dual_solution =
1408 Eigen::Map<Eigen::VectorXd>(glop_solution.dual_values.data(),
1409 glop_solution.dual_values.size().value());
1410 // We called `QpToMpModelProto()` and `MPModelProtoToLinearProgram()` to
1411 // convert our original QP into input for glop's preprocessor. The former
1412 // multiplies the objective vector by `objective_scaling_factor`, which
1413 // multiplies the duals by that factor as well. To undo this we divide by
1414 // `objective_scaling_factor`.
1415 solution.dual_solution /=
1416 presolve_info_->sharded_original_qp.Qp().objective_scaling_factor;
1417 // Glop's preprocessor sometimes violates the primal bounds constraints. To
1418 // be safe we project both primal and dual.
1419 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1420 solution.primal_solution);
1421 ProjectToDualVariableBounds(presolve_info_->sharded_original_qp,
1422 solution.dual_solution);
1423 return solution;
1424 } else {
1425 return working_solution;
1426 }
1427}
1428
1429void SetActiveSetInformation(const ShardedQuadraticProgram& sharded_qp,
1430 const VectorXd& primal_solution,
1431 const VectorXd& dual_solution,
1432 const VectorXd& primal_start_point,
1433 const VectorXd& dual_start_point,
1434 PointMetadata& metadata) {
1435 CHECK_EQ(primal_solution.size(), sharded_qp.PrimalSize());
1436 CHECK_EQ(dual_solution.size(), sharded_qp.DualSize());
1437 CHECK_EQ(primal_start_point.size(), sharded_qp.PrimalSize());
1438 CHECK_EQ(dual_start_point.size(), sharded_qp.DualSize());
1439
1440 const QuadraticProgram& qp = sharded_qp.Qp();
1441 metadata.set_active_primal_variable_count(
1442 static_cast<int64_t>(sharded_qp.PrimalSharder().ParallelSumOverShards(
1443 [&](const Sharder::Shard& shard) {
1444 const auto primal_shard = shard(primal_solution);
1445 const auto lower_bound_shard = shard(qp.variable_lower_bounds);
1446 const auto upper_bound_shard = shard(qp.variable_upper_bounds);
1447 return (primal_shard.array() > lower_bound_shard.array() &&
1448 primal_shard.array() < upper_bound_shard.array())
1449 .count();
1450 })));
1451
1452 // Most of the computation from the previous `ParallelSumOverShards` is
1453 // duplicated here. However the overhead shouldn't be too large, and using
1454 // `ParallelSumOverShards` is simpler than just using `ParallelForEachShard`.
1455 metadata.set_active_primal_variable_change(
1456 static_cast<int64_t>(sharded_qp.PrimalSharder().ParallelSumOverShards(
1457 [&](const Sharder::Shard& shard) {
1458 const auto primal_shard = shard(primal_solution);
1459 const auto primal_start_shard = shard(primal_start_point);
1460 const auto lower_bound_shard = shard(qp.variable_lower_bounds);
1461 const auto upper_bound_shard = shard(qp.variable_upper_bounds);
1462 return ((primal_shard.array() > lower_bound_shard.array() &&
1463 primal_shard.array() < upper_bound_shard.array()) !=
1464 (primal_start_shard.array() > lower_bound_shard.array() &&
1465 primal_start_shard.array() < upper_bound_shard.array()))
1466 .count();
1467 })));
1468
1469 metadata.set_active_dual_variable_count(
1470 static_cast<int64_t>(sharded_qp.DualSharder().ParallelSumOverShards(
1471 [&](const Sharder::Shard& shard) {
1472 const auto dual_shard = shard(dual_solution);
1473 const auto lower_bound_shard = shard(qp.constraint_lower_bounds);
1474 const auto upper_bound_shard = shard(qp.constraint_upper_bounds);
1475 const double kInfinity = std::numeric_limits<double>::infinity();
1476 return (dual_shard.array() != 0.0 ||
1477 (lower_bound_shard.array() == -kInfinity &&
1478 upper_bound_shard.array() == kInfinity))
1479 .count();
1480 })));
1481
1482 metadata.set_active_dual_variable_change(
1483 static_cast<int64_t>(sharded_qp.DualSharder().ParallelSumOverShards(
1484 [&](const Sharder::Shard& shard) {
1485 const auto dual_shard = shard(dual_solution);
1486 const auto dual_start_shard = shard(dual_start_point);
1487 const auto lower_bound_shard = shard(qp.constraint_lower_bounds);
1488 const auto upper_bound_shard = shard(qp.constraint_upper_bounds);
1489 const double kInfinity = std::numeric_limits<double>::infinity();
1490 return ((dual_shard.array() != 0.0 ||
1491 (lower_bound_shard.array() == -kInfinity &&
1492 upper_bound_shard.array() == kInfinity)) !=
1493 (dual_start_shard.array() != 0.0 ||
1494 (lower_bound_shard.array() == -kInfinity &&
1495 upper_bound_shard.array() == kInfinity)))
1496 .count();
1497 })));
1498}
1499
1500void PreprocessSolver::AddPointMetadata(
1501 const PrimalDualHybridGradientParams& params,
1502 const VectorXd& primal_solution, const VectorXd& dual_solution,
1503 PointType point_type, const VectorXd& last_primal_start_point,
1504 const VectorXd& last_dual_start_point, IterationStats& stats) const {
1505 PointMetadata metadata;
1506 metadata.set_point_type(point_type);
1507 std::vector<int> random_projection_seeds(
1508 params.random_projection_seeds().begin(),
1509 params.random_projection_seeds().end());
1510 SetRandomProjections(sharded_qp_, primal_solution, dual_solution,
1511 random_projection_seeds, metadata);
1512 if (point_type != POINT_TYPE_ITERATE_DIFFERENCE) {
1513 SetActiveSetInformation(sharded_qp_, primal_solution, dual_solution,
1514 last_primal_start_point, last_dual_start_point,
1515 metadata);
1516 }
1517 *stats.add_point_metadata() = metadata;
1518}
1519
1520std::optional<TerminationReasonAndPointType>
1521PreprocessSolver::UpdateIterationStatsAndCheckTermination(
1522 const PrimalDualHybridGradientParams& params,
1523 bool force_numerical_termination, const VectorXd& working_primal_current,
1524 const VectorXd& working_dual_current,
1525 const VectorXd* working_primal_average,
1526 const VectorXd* working_dual_average, const VectorXd* working_primal_delta,
1527 const VectorXd* working_dual_delta, const VectorXd& last_primal_start_point,
1528 const VectorXd& last_dual_start_point,
1529 const std::atomic<bool>* interrupt_solve,
1530 const IterationType iteration_type, const IterationStats& full_stats,
1531 IterationStats& stats) {
1532 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1533 params, working_primal_current, working_dual_current,
1534 POINT_TYPE_CURRENT_ITERATE, stats.add_convergence_information(),
1535 stats.add_infeasibility_information());
1536 AddPointMetadata(params, working_primal_current, working_dual_current,
1537 POINT_TYPE_CURRENT_ITERATE, last_primal_start_point,
1538 last_dual_start_point, stats);
1539 if (working_primal_average != nullptr && working_dual_average != nullptr) {
1540 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1541 params, *working_primal_average, *working_dual_average,
1542 POINT_TYPE_AVERAGE_ITERATE, stats.add_convergence_information(),
1543 stats.add_infeasibility_information());
1544 AddPointMetadata(params, *working_primal_average, *working_dual_average,
1545 POINT_TYPE_AVERAGE_ITERATE, last_primal_start_point,
1546 last_dual_start_point, stats);
1547 }
1548 // Undoing presolve doesn't work for iterate differences.
1549 if (!presolve_info_.has_value() && working_primal_delta != nullptr &&
1550 working_dual_delta != nullptr) {
1551 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1552 params, *working_primal_delta, *working_dual_delta,
1553 POINT_TYPE_ITERATE_DIFFERENCE, nullptr,
1554 stats.add_infeasibility_information());
1555 AddPointMetadata(params, *working_primal_delta, *working_dual_delta,
1556 POINT_TYPE_ITERATE_DIFFERENCE, last_primal_start_point,
1557 last_dual_start_point, stats);
1558 }
1559 constexpr int kLogEvery = 15;
1560 absl::Time logging_time = absl::Now();
1561 if (params.verbosity_level() >= 2 &&
1562 (params.log_interval_seconds() == 0.0 ||
1563 logging_time - time_of_last_log_ >=
1564 absl::Seconds(params.log_interval_seconds()))) {
1565 if (log_counter_ == 0) {
1566 LogIterationStatsHeader(params.verbosity_level(),
1567 params.use_feasibility_polishing(), logger_);
1568 }
1569 LogIterationStats(params.verbosity_level(),
1570 params.use_feasibility_polishing(), iteration_type, stats,
1571 params.termination_criteria(), original_bound_norms_,
1572 POINT_TYPE_AVERAGE_ITERATE, logger_);
1573 if (params.verbosity_level() >= 4) {
1574 // If the convergence information doesn't contain an average iterate, the
1575 // previous `LogIterationStats()` will report some other iterate (usually
1576 // the current one) so don't repeat logging it.
1577 if (GetConvergenceInformation(stats, POINT_TYPE_AVERAGE_ITERATE) !=
1578 std::nullopt) {
1579 LogIterationStats(
1580 params.verbosity_level(), params.use_feasibility_polishing(),
1581 iteration_type, stats, params.termination_criteria(),
1582 original_bound_norms_, POINT_TYPE_CURRENT_ITERATE, logger_);
1583 }
1584 }
1585 time_of_last_log_ = logging_time;
1586 if (++log_counter_ >= kLogEvery) {
1587 log_counter_ = 0;
1588 }
1589 }
1590 if (iteration_stats_callback_ != nullptr) {
1591 iteration_stats_callback_(
1592 {.iteration_type = iteration_type,
1593 .termination_criteria = params.termination_criteria(),
1594 .iteration_stats = stats,
1595 .bound_norms = original_bound_norms_});
1596 }
1597
1598 if (const auto termination = CheckIterateTerminationCriteria(
1599 params.termination_criteria(), stats, original_bound_norms_,
1600 force_numerical_termination);
1601 termination.has_value()) {
1602 return termination;
1603 }
1604 return CheckSimpleTerminationCriteria(params.termination_criteria(),
1605 full_stats, interrupt_solve);
1606}
1607
1608void PreprocessSolver::ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1609 const PrimalDualHybridGradientParams& params,
1610 const VectorXd& working_primal, const VectorXd& working_dual,
1611 PointType candidate_type, ConvergenceInformation* convergence_information,
1612 InfeasibilityInformation* infeasibility_information) const {
1613 const TerminationCriteria::DetailedOptimalityCriteria criteria =
1614 EffectiveOptimalityCriteria(params.termination_criteria());
1615 const double primal_epsilon_ratio =
1616 EpsilonRatio(criteria.eps_optimal_primal_residual_absolute(),
1617 criteria.eps_optimal_primal_residual_relative());
1618 const double dual_epsilon_ratio =
1619 EpsilonRatio(criteria.eps_optimal_dual_residual_absolute(),
1620 criteria.eps_optimal_dual_residual_relative());
1621 if (presolve_info_.has_value()) {
1622 // Undoing presolve doesn't make sense for iterate differences.
1623 CHECK_NE(candidate_type, POINT_TYPE_ITERATE_DIFFERENCE);
1624
1625 PrimalAndDualSolution original = RecoverOriginalSolution(
1626 {.primal_solution = working_primal, .dual_solution = working_dual});
1627 if (convergence_information != nullptr) {
1628 *convergence_information = ComputeConvergenceInformation(
1629 params, presolve_info_->sharded_original_qp,
1630 presolve_info_->trivial_col_scaling_vec,
1631 presolve_info_->trivial_row_scaling_vec, original.primal_solution,
1632 original.dual_solution, primal_epsilon_ratio, dual_epsilon_ratio,
1633 candidate_type);
1634 }
1635 if (infeasibility_information != nullptr) {
1636 VectorXd primal_copy =
1637 CloneVector(original.primal_solution,
1638 presolve_info_->sharded_original_qp.PrimalSharder());
1639 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1640 primal_copy,
1641 /*use_feasibility_bounds=*/true);
1642 // Only iterate differences should be able to violate the dual variable
1643 // bounds, and iterate differences aren't used when presolve is enabled.
1644 *infeasibility_information = ComputeInfeasibilityInformation(
1645 params, presolve_info_->sharded_original_qp,
1646 presolve_info_->trivial_col_scaling_vec,
1647 presolve_info_->trivial_row_scaling_vec, primal_copy,
1648 original.dual_solution, original.primal_solution, candidate_type);
1649 }
1650 } else {
1651 if (convergence_information != nullptr) {
1652 *convergence_information = ComputeConvergenceInformation(
1653 params, sharded_qp_, col_scaling_vec_, row_scaling_vec_,
1654 working_primal, working_dual, primal_epsilon_ratio,
1655 dual_epsilon_ratio, candidate_type);
1656 }
1657 if (infeasibility_information != nullptr) {
1658 VectorXd primal_copy =
1659 CloneVector(working_primal, sharded_qp_.PrimalSharder());
1660 ProjectToPrimalVariableBounds(sharded_qp_, primal_copy,
1661 /*use_feasibility_bounds=*/true);
1662 if (candidate_type == POINT_TYPE_ITERATE_DIFFERENCE) {
1663 // Iterate differences might not satisfy the dual variable bounds.
1664 VectorXd dual_copy =
1665 CloneVector(working_dual, sharded_qp_.DualSharder());
1666 ProjectToDualVariableBounds(sharded_qp_, dual_copy);
1667 *infeasibility_information = ComputeInfeasibilityInformation(
1668 params, sharded_qp_, col_scaling_vec_, row_scaling_vec_,
1669 primal_copy, dual_copy, working_primal, candidate_type);
1670 } else {
1671 *infeasibility_information = ComputeInfeasibilityInformation(
1672 params, sharded_qp_, col_scaling_vec_, row_scaling_vec_,
1673 primal_copy, working_dual, working_primal, candidate_type);
1674 }
1675 }
1676 }
1677}
1678
1679// `result` is used both as the input and as the temporary that will be
1680// returned.
1681SolverResult PreprocessSolver::ConstructOriginalSolverResult(
1682 const PrimalDualHybridGradientParams& params, SolverResult result,
1683 SolverLogger& logger) const {
1684 const bool use_zero_primal_objective =
1685 result.solve_log.termination_reason() ==
1686 TERMINATION_REASON_PRIMAL_INFEASIBLE;
1687 if (presolve_info_.has_value()) {
1688 // Transform the solutions so they match the original unscaled problem.
1689 PrimalAndDualSolution original_solution = RecoverOriginalSolution(
1690 {.primal_solution = std::move(result.primal_solution),
1691 .dual_solution = std::move(result.dual_solution)});
1692 result.primal_solution = std::move(original_solution.primal_solution);
1693 if (result.solve_log.termination_reason() ==
1694 TERMINATION_REASON_DUAL_INFEASIBLE) {
1695 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1696 result.primal_solution,
1697 /*use_feasibility_bounds=*/true);
1698 }
1699 // If presolve is enabled, no projection of the dual is performed when
1700 // checking for infeasibility.
1701 result.dual_solution = std::move(original_solution.dual_solution);
1702 // `RecoverOriginalSolution` doesn't recover reduced costs so we need to
1703 // compute them with respect to the original problem.
1704 result.reduced_costs = ReducedCosts(
1705 params, presolve_info_->sharded_original_qp, result.primal_solution,
1706 result.dual_solution, use_zero_primal_objective);
1707 } else {
1708 if (result.solve_log.termination_reason() ==
1709 TERMINATION_REASON_DUAL_INFEASIBLE) {
1710 ProjectToPrimalVariableBounds(sharded_qp_, result.primal_solution,
1711 /*use_feasibility_bounds=*/true);
1712 }
1713 if (result.solve_log.termination_reason() ==
1714 TERMINATION_REASON_PRIMAL_INFEASIBLE) {
1715 ProjectToDualVariableBounds(sharded_qp_, result.dual_solution);
1716 }
1717 result.reduced_costs =
1718 ReducedCosts(params, sharded_qp_, result.primal_solution,
1719 result.dual_solution, use_zero_primal_objective);
1720 // Transform the solutions so they match the original unscaled problem.
1721 CoefficientWiseProductInPlace(col_scaling_vec_, sharded_qp_.PrimalSharder(),
1722 result.primal_solution);
1723 CoefficientWiseProductInPlace(row_scaling_vec_, sharded_qp_.DualSharder(),
1724 result.dual_solution);
1726 col_scaling_vec_, sharded_qp_.PrimalSharder(), result.reduced_costs);
1727 }
1728 IterationType iteration_type;
1729 switch (result.solve_log.solution_type()) {
1730 case POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION:
1731 iteration_type = IterationType::kFeasibilityPolishingTermination;
1732 break;
1733 case POINT_TYPE_PRESOLVER_SOLUTION:
1734 iteration_type = IterationType::kPresolveTermination;
1735 break;
1736 default:
1737 iteration_type = IterationType::kNormalTermination;
1738 break;
1739 }
1740 if (iteration_stats_callback_ != nullptr) {
1741 iteration_stats_callback_(
1742 {.iteration_type = iteration_type,
1743 .termination_criteria = params.termination_criteria(),
1744 .iteration_stats = result.solve_log.solution_stats(),
1745 .bound_norms = original_bound_norms_});
1746 }
1747
1748 if (params.verbosity_level() >= 1) {
1749 SOLVER_LOG(&logger, "Termination reason: ",
1750 TerminationReason_Name(result.solve_log.termination_reason()));
1751 SOLVER_LOG(&logger, "Solution point type: ",
1752 PointType_Name(result.solve_log.solution_type()));
1753 SOLVER_LOG(&logger, "Final solution stats:");
1754 LogIterationStatsHeader(params.verbosity_level(),
1755 params.use_feasibility_polishing(), logger);
1756 LogIterationStats(params.verbosity_level(),
1757 params.use_feasibility_polishing(), iteration_type,
1758 result.solve_log.solution_stats(),
1759 params.termination_criteria(), original_bound_norms_,
1760 result.solve_log.solution_type(), logger);
1761 const auto& convergence_info = GetConvergenceInformation(
1762 result.solve_log.solution_stats(), result.solve_log.solution_type());
1763 if (convergence_info.has_value()) {
1764 if (std::isfinite(convergence_info->corrected_dual_objective())) {
1765 SOLVER_LOG(&logger, "Dual objective after infeasibility correction: ",
1766 convergence_info->corrected_dual_objective());
1767 }
1768 }
1769 }
1770 return result;
1771}
1772
1773Solver::Solver(const PrimalDualHybridGradientParams& params,
1774 VectorXd starting_primal_solution,
1775 VectorXd starting_dual_solution, const double initial_step_size,
1776 const double initial_primal_weight,
1777 PreprocessSolver* preprocess_solver)
1778 : params_(params),
1779 current_primal_solution_(std::move(starting_primal_solution)),
1780 current_dual_solution_(std::move(starting_dual_solution)),
1781 primal_average_(&preprocess_solver->ShardedWorkingQp().PrimalSharder()),
1782 dual_average_(&preprocess_solver->ShardedWorkingQp().DualSharder()),
1783 step_size_(initial_step_size),
1784 primal_weight_(initial_primal_weight),
1785 preprocess_solver_(preprocess_solver) {}
1786
1787Solver::NextSolutionAndDelta Solver::ComputeNextPrimalSolution(
1788 double primal_step_size) const {
1789 const int64_t primal_size = ShardedWorkingQp().PrimalSize();
1790 NextSolutionAndDelta result = {
1791 .value = VectorXd(primal_size),
1792 .delta = VectorXd(primal_size),
1793 };
1794 const QuadraticProgram& qp = WorkingQp();
1795 // This computes the primal portion of the PDHG algorithm:
1796 // argmin_x[gradient(f)(`current_primal_solution_`)^T x + g(x)
1797 // + `current_dual_solution_`^T K x
1798 // + (0.5 / `primal_step_size`) * norm(x - `current_primal_solution_`)^2]
1799 // See Sections 2 - 3 of Chambolle and Pock and the comment in the header.
1800 // We omitted the constant terms from Chambolle and Pock's (7).
1801 // This minimization is easy to do in closed form since it can be separated
1802 // into independent problems for each of the primal variables.
1803 ShardedWorkingQp().PrimalSharder().ParallelForEachShard(
1804 [&](const Sharder::Shard& shard) {
1805 if (!IsLinearProgram(qp)) {
1806 // TODO(user): Does changing this to auto (so it becomes an
1807 // Eigen deferred result), or inlining it below, change performance?
1808 const VectorXd diagonal_scaling =
1809 primal_step_size *
1810 shard(qp.objective_matrix->diagonal()).array() +
1811 1.0;
1812 shard(result.value) =
1813 (shard(current_primal_solution_) -
1814 primal_step_size *
1815 (shard(qp.objective_vector) - shard(current_dual_product_)))
1816 // Scale i-th element by 1 / (1 + `primal_step_size` * Q_{ii})
1817 .cwiseQuotient(diagonal_scaling)
1818 .cwiseMin(shard(qp.variable_upper_bounds))
1819 .cwiseMax(shard(qp.variable_lower_bounds));
1820 } else {
1821 // The formula in the LP case is simplified for better performance.
1822 shard(result.value) =
1823 (shard(current_primal_solution_) -
1824 primal_step_size *
1825 (shard(qp.objective_vector) - shard(current_dual_product_)))
1826 .cwiseMin(shard(qp.variable_upper_bounds))
1827 .cwiseMax(shard(qp.variable_lower_bounds));
1828 }
1829 shard(result.delta) =
1830 shard(result.value) - shard(current_primal_solution_);
1831 });
1832 return result;
1833}
1834
1835Solver::NextSolutionAndDelta Solver::ComputeNextDualSolution(
1836 double dual_step_size, double extrapolation_factor,
1837 const NextSolutionAndDelta& next_primal_solution) const {
1838 const int64_t dual_size = ShardedWorkingQp().DualSize();
1839 NextSolutionAndDelta result = {
1840 .value = VectorXd(dual_size),
1841 .delta = VectorXd(dual_size),
1842 };
1843 const QuadraticProgram& qp = WorkingQp();
1844 VectorXd extrapolated_primal(ShardedWorkingQp().PrimalSize());
1845 ShardedWorkingQp().PrimalSharder().ParallelForEachShard(
1846 [&](const Sharder::Shard& shard) {
1847 shard(extrapolated_primal) =
1848 (shard(next_primal_solution.value) +
1849 extrapolation_factor * shard(next_primal_solution.delta));
1850 });
1851 // TODO(user): Refactor this multiplication so that we only do one matrix
1852 // vector multiply for the primal variable. This only applies to Malitsky and
1853 // Pock and not to the adaptive step size rule.
1854 ShardedWorkingQp().TransposedConstraintMatrixSharder().ParallelForEachShard(
1855 [&](const Sharder::Shard& shard) {
1856 VectorXd temp =
1857 shard(current_dual_solution_) -
1858 dual_step_size *
1859 shard(ShardedWorkingQp().TransposedConstraintMatrix())
1860 .transpose() *
1861 extrapolated_primal;
1862 // Each element of the argument of `.cwiseMin()` is the critical point
1863 // of the respective 1D minimization problem if it's negative.
1864 // Likewise the argument to the `.cwiseMax()` is the critical point if
1865 // positive.
1866 shard(result.value) =
1867 VectorXd::Zero(temp.size())
1868 .cwiseMin(temp +
1869 dual_step_size * shard(qp.constraint_upper_bounds))
1870 .cwiseMax(temp +
1871 dual_step_size * shard(qp.constraint_lower_bounds));
1872 shard(result.delta) =
1873 (shard(result.value) - shard(current_dual_solution_));
1874 });
1875 return result;
1876}
1877
1878std::pair<double, double> Solver::ComputeMovementTerms(
1879 const VectorXd& delta_primal, const VectorXd& delta_dual) const {
1880 return {SquaredNorm(delta_primal, ShardedWorkingQp().PrimalSharder()),
1881 SquaredNorm(delta_dual, ShardedWorkingQp().DualSharder())};
1882}
1883
1884double Solver::ComputeMovement(const VectorXd& delta_primal,
1885 const VectorXd& delta_dual) const {
1886 const auto [primal_squared_norm, dual_squared_norm] =
1887 ComputeMovementTerms(delta_primal, delta_dual);
1888 return (0.5 * primal_weight_ * primal_squared_norm) +
1889 (0.5 / primal_weight_) * dual_squared_norm;
1890}
1891
1892double Solver::ComputeNonlinearity(const VectorXd& delta_primal,
1893 const VectorXd& next_dual_product) const {
1894 // Lemma 1 in Chambolle and Pock includes a term with L_f, the Lipshitz
1895 // constant of f. This is zero in our formulation.
1896 return ShardedWorkingQp().PrimalSharder().ParallelSumOverShards(
1897 [&](const Sharder::Shard& shard) {
1898 return -shard(delta_primal)
1899 .dot(shard(next_dual_product) -
1900 shard(current_dual_product_));
1901 });
1902}
1903
1904IterationStats Solver::CreateSimpleIterationStats(
1905 RestartChoice restart_used) const {
1906 IterationStats stats;
1907 double num_kkt_passes_per_rejected_step = 1.0;
1908 if (params_.linesearch_rule() ==
1909 PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE) {
1910 num_kkt_passes_per_rejected_step = 0.5;
1911 }
1912 stats.set_iteration_number(iterations_completed_);
1913 stats.set_cumulative_rejected_steps(num_rejected_steps_);
1914 // TODO(user): This formula doesn't account for kkt passes in major
1915 // iterations.
1916 stats.set_cumulative_kkt_matrix_passes(iterations_completed_ +
1917 num_kkt_passes_per_rejected_step *
1918 num_rejected_steps_);
1919 stats.set_cumulative_time_sec(preprocessing_time_sec_ + timer_.Get());
1920 stats.set_restart_used(restart_used);
1921 stats.set_step_size(step_size_);
1922 stats.set_primal_weight(primal_weight_);
1923 return stats;
1924}
1925
1926double Solver::DistanceTraveledFromLastStart(
1927 const VectorXd& primal_solution, const VectorXd& dual_solution) const {
1928 return std::sqrt((0.5 * primal_weight_) *
1929 SquaredDistance(primal_solution,
1930 last_primal_start_point_,
1931 ShardedWorkingQp().PrimalSharder()) +
1932 (0.5 / primal_weight_) *
1933 SquaredDistance(dual_solution, last_dual_start_point_,
1934 ShardedWorkingQp().DualSharder()));
1935}
1936
1937LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtCurrent() const {
1938 const double distance_traveled_by_current = DistanceTraveledFromLastStart(
1939 current_primal_solution_, current_dual_solution_);
1941 ShardedWorkingQp(), current_primal_solution_, current_dual_solution_,
1942 PrimalDualNorm::kEuclideanNorm, primal_weight_,
1943 distance_traveled_by_current,
1944 /*primal_product=*/nullptr, &current_dual_product_,
1945 params_.use_diagonal_qp_trust_region_solver(),
1946 params_.diagonal_qp_trust_region_solver_tolerance());
1947}
1948
1949LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtAverage() const {
1950 // TODO(user): These vectors are recomputed again for termination checks
1951 // and again if we eventually restart to the average.
1952 VectorXd average_primal = PrimalAverage();
1953 VectorXd average_dual = DualAverage();
1954
1955 const double distance_traveled_by_average =
1956 DistanceTraveledFromLastStart(average_primal, average_dual);
1957
1959 ShardedWorkingQp(), average_primal, average_dual,
1960 PrimalDualNorm::kEuclideanNorm, primal_weight_,
1961 distance_traveled_by_average,
1962 /*primal_product=*/nullptr, /*dual_product=*/nullptr,
1963 params_.use_diagonal_qp_trust_region_solver(),
1964 params_.diagonal_qp_trust_region_solver_tolerance());
1965}
1966
1967bool AverageHasBetterPotential(
1968 const LocalizedLagrangianBounds& local_bounds_at_average,
1969 const LocalizedLagrangianBounds& local_bounds_at_current) {
1970 return BoundGap(local_bounds_at_average) /
1971 MathUtil::Square(local_bounds_at_average.radius) <
1972 BoundGap(local_bounds_at_current) /
1973 MathUtil::Square(local_bounds_at_current.radius);
1974}
1975
1976double NormalizedGap(
1977 const LocalizedLagrangianBounds& local_bounds_at_candidate) {
1978 const double distance_traveled_by_candidate =
1979 local_bounds_at_candidate.radius;
1980 return BoundGap(local_bounds_at_candidate) / distance_traveled_by_candidate;
1981}
1982
1983// TODO(user): Review / cleanup adaptive heuristic.
1984bool Solver::ShouldDoAdaptiveRestartHeuristic(
1985 double candidate_normalized_gap) const {
1986 const double gap_reduction_ratio =
1987 candidate_normalized_gap / normalized_gap_at_last_restart_;
1988 if (gap_reduction_ratio < params_.sufficient_reduction_for_restart()) {
1989 return true;
1990 }
1991 if (gap_reduction_ratio < params_.necessary_reduction_for_restart() &&
1992 candidate_normalized_gap > normalized_gap_at_last_trial_) {
1993 // We've made the "necessary" amount of progress, and iterates appear to
1994 // be getting worse, so restart.
1995 return true;
1996 }
1997 return false;
1998}
1999
2000RestartChoice Solver::DetermineDistanceBasedRestartChoice() const {
2001 // The following checks are safeguards that normally should not be triggered.
2002 if (primal_average_.NumTerms() == 0) {
2003 return RESTART_CHOICE_NO_RESTART;
2004 } else if (distance_based_restart_info_.length_of_last_restart_period == 0) {
2005 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2006 }
2007 const int restart_period_length = primal_average_.NumTerms();
2008 const double distance_moved_this_restart_period_by_average =
2009 DistanceTraveledFromLastStart(primal_average_.ComputeAverage(),
2010 dual_average_.ComputeAverage());
2011 const double distance_moved_last_restart_period =
2012 distance_based_restart_info_.distance_moved_last_restart_period;
2013
2014 // A restart should be triggered when the normalized distance traveled by
2015 // the average is at least a constant factor smaller than the last.
2016 // TODO(user): Experiment with using `.necessary_reduction_for_restart()`
2017 // as a heuristic when deciding if a restart should be triggered.
2018 if ((distance_moved_this_restart_period_by_average / restart_period_length) <
2019 params_.sufficient_reduction_for_restart() *
2020 (distance_moved_last_restart_period /
2021 distance_based_restart_info_.length_of_last_restart_period)) {
2022 // Restart at current solution when it yields a smaller normalized potential
2023 // function value than the average (heuristic suggested by ohinder@).
2024 if (AverageHasBetterPotential(ComputeLocalizedBoundsAtAverage(),
2025 ComputeLocalizedBoundsAtCurrent())) {
2026 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2027 } else {
2028 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2029 }
2030 } else {
2031 return RESTART_CHOICE_NO_RESTART;
2032 }
2033}
2034
2035RestartChoice Solver::ChooseRestartToApply(const bool is_major_iteration) {
2036 if (!primal_average_.HasNonzeroWeight() &&
2037 !dual_average_.HasNonzeroWeight()) {
2038 return RESTART_CHOICE_NO_RESTART;
2039 }
2040 // TODO(user): This forced restart is very important for the performance of
2041 // `ADAPTIVE_HEURISTIC`. Test if the impact comes primarily from the first
2042 // forced restart (which would unseat a good initial starting point that could
2043 // prevent restarts early in the solve) or if it's really needed for the full
2044 // duration of the solve. If it is really needed, should we then trigger major
2045 // iterations on powers of two?
2046 const int restart_length = primal_average_.NumTerms();
2047 if (restart_length >= iterations_completed_ / 2 &&
2048 params_.restart_strategy() ==
2049 PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC) {
2050 if (AverageHasBetterPotential(ComputeLocalizedBoundsAtAverage(),
2051 ComputeLocalizedBoundsAtCurrent())) {
2052 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2053 } else {
2054 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2055 }
2056 }
2057 if (is_major_iteration) {
2058 switch (params_.restart_strategy()) {
2059 case PrimalDualHybridGradientParams::NO_RESTARTS:
2060 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2061 case PrimalDualHybridGradientParams::EVERY_MAJOR_ITERATION:
2062 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2063 case PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC: {
2064 const LocalizedLagrangianBounds local_bounds_at_average =
2065 ComputeLocalizedBoundsAtAverage();
2066 const LocalizedLagrangianBounds local_bounds_at_current =
2067 ComputeLocalizedBoundsAtCurrent();
2068 double normalized_gap;
2069 RestartChoice choice;
2070 if (AverageHasBetterPotential(local_bounds_at_average,
2071 local_bounds_at_current)) {
2072 normalized_gap = NormalizedGap(local_bounds_at_average);
2073 choice = RESTART_CHOICE_RESTART_TO_AVERAGE;
2074 } else {
2075 normalized_gap = NormalizedGap(local_bounds_at_current);
2076 choice = RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2077 }
2078 if (ShouldDoAdaptiveRestartHeuristic(normalized_gap)) {
2079 return choice;
2080 } else {
2081 normalized_gap_at_last_trial_ = normalized_gap;
2082 return RESTART_CHOICE_NO_RESTART;
2083 }
2084 }
2085 case PrimalDualHybridGradientParams::ADAPTIVE_DISTANCE_BASED: {
2086 return DetermineDistanceBasedRestartChoice();
2087 }
2088 default:
2089 LOG(FATAL) << "Unrecognized restart_strategy "
2090 << params_.restart_strategy();
2091 return RESTART_CHOICE_UNSPECIFIED;
2092 }
2093 } else {
2094 return RESTART_CHOICE_NO_RESTART;
2095 }
2096}
2097
2098VectorXd Solver::PrimalAverage() const {
2099 if (primal_average_.HasNonzeroWeight()) {
2100 return primal_average_.ComputeAverage();
2101 } else {
2102 return current_primal_solution_;
2103 }
2104}
2105
2106VectorXd Solver::DualAverage() const {
2107 if (dual_average_.HasNonzeroWeight()) {
2108 return dual_average_.ComputeAverage();
2109 } else {
2110 return current_dual_solution_;
2111 }
2112}
2113
2114double Solver::ComputeNewPrimalWeight() const {
2115 const double primal_distance =
2116 Distance(current_primal_solution_, last_primal_start_point_,
2117 ShardedWorkingQp().PrimalSharder());
2118 const double dual_distance =
2119 Distance(current_dual_solution_, last_dual_start_point_,
2120 ShardedWorkingQp().DualSharder());
2121 // This choice of a nonzero tolerance balances performance and numerical
2122 // issues caused by very huge or very tiny weights. It was picked as the best
2123 // among {0.0, 1.0e-20, 2.0e-16, 1.0e-10, 1.0e-5} on the preprocessed MIPLIB
2124 // dataset. The effect of changing this value is relatively minor overall.
2125 constexpr double kNonzeroTol = 1.0e-10;
2126 if (primal_distance <= kNonzeroTol || primal_distance >= 1.0 / kNonzeroTol ||
2127 dual_distance <= kNonzeroTol || dual_distance >= 1.0 / kNonzeroTol) {
2128 return primal_weight_;
2129 }
2130 const double smoothing_param = params_.primal_weight_update_smoothing();
2131 const double unsmoothed_new_primal_weight = dual_distance / primal_distance;
2132 const double new_primal_weight =
2133 std::exp(smoothing_param * std::log(unsmoothed_new_primal_weight) +
2134 (1.0 - smoothing_param) * std::log(primal_weight_));
2135 if (params_.verbosity_level() >= 4) {
2136 SOLVER_LOG(&preprocess_solver_->Logger(), "New computed primal weight is ",
2137 new_primal_weight, " at iteration ", iterations_completed_);
2138 }
2139 return new_primal_weight;
2140}
2141
2142SolverResult Solver::PickSolutionAndConstructSolverResult(
2143 VectorXd primal_solution, VectorXd dual_solution,
2144 const IterationStats& stats, TerminationReason termination_reason,
2145 PointType output_type, SolveLog solve_log) const {
2146 switch (output_type) {
2147 case POINT_TYPE_CURRENT_ITERATE:
2148 AssignVector(current_primal_solution_, ShardedWorkingQp().PrimalSharder(),
2149 primal_solution);
2150 AssignVector(current_dual_solution_, ShardedWorkingQp().DualSharder(),
2151 dual_solution);
2152 break;
2153 case POINT_TYPE_ITERATE_DIFFERENCE:
2154 AssignVector(current_primal_delta_, ShardedWorkingQp().PrimalSharder(),
2155 primal_solution);
2156 AssignVector(current_dual_delta_, ShardedWorkingQp().DualSharder(),
2157 dual_solution);
2158 break;
2159 case POINT_TYPE_AVERAGE_ITERATE:
2160 case POINT_TYPE_PRESOLVER_SOLUTION:
2161 break;
2162 default:
2163 // Default to average whenever `output_type` is `POINT_TYPE_NONE`.
2164 output_type = POINT_TYPE_AVERAGE_ITERATE;
2165 break;
2166 }
2167 return ConstructSolverResult(
2168 std::move(primal_solution), std::move(dual_solution), stats,
2169 termination_reason, output_type, std::move(solve_log));
2170}
2171
2172void Solver::ApplyRestartChoice(const RestartChoice restart_to_apply) {
2173 switch (restart_to_apply) {
2174 case RESTART_CHOICE_UNSPECIFIED:
2175 case RESTART_CHOICE_NO_RESTART:
2176 return;
2177 case RESTART_CHOICE_WEIGHTED_AVERAGE_RESET:
2178 if (params_.verbosity_level() >= 4) {
2179 SOLVER_LOG(&preprocess_solver_->Logger(),
2180 "Restarted to current on iteration ", iterations_completed_,
2181 " after ", primal_average_.NumTerms(), " iterations");
2182 }
2183 break;
2184 case RESTART_CHOICE_RESTART_TO_AVERAGE:
2185 if (params_.verbosity_level() >= 4) {
2186 SOLVER_LOG(&preprocess_solver_->Logger(),
2187 "Restarted to average on iteration ", iterations_completed_,
2188 " after ", primal_average_.NumTerms(), " iterations");
2189 }
2190 current_primal_solution_ = primal_average_.ComputeAverage();
2191 current_dual_solution_ = dual_average_.ComputeAverage();
2192 current_dual_product_ = TransposedMatrixVectorProduct(
2193 WorkingQp().constraint_matrix, current_dual_solution_,
2194 ShardedWorkingQp().ConstraintMatrixSharder());
2195 break;
2196 }
2197 primal_weight_ = ComputeNewPrimalWeight();
2198 ratio_last_two_step_sizes_ = 1;
2199 if (params_.restart_strategy() ==
2200 PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC) {
2201 // It's important for the theory that the distances here are calculated
2202 // given the new primal weight.
2203 const LocalizedLagrangianBounds local_bounds_at_last_restart =
2204 ComputeLocalizedBoundsAtCurrent();
2205 const double distance_traveled_since_last_restart =
2206 local_bounds_at_last_restart.radius;
2207 normalized_gap_at_last_restart_ = BoundGap(local_bounds_at_last_restart) /
2208 distance_traveled_since_last_restart;
2209 normalized_gap_at_last_trial_ = std::numeric_limits<double>::infinity();
2210 } else if (params_.restart_strategy() ==
2211 PrimalDualHybridGradientParams::ADAPTIVE_DISTANCE_BASED) {
2212 // Update parameters for distance-based restarts.
2213 distance_based_restart_info_ = {
2214 .distance_moved_last_restart_period = DistanceTraveledFromLastStart(
2215 current_primal_solution_, current_dual_solution_),
2216 .length_of_last_restart_period = primal_average_.NumTerms()};
2217 }
2218 primal_average_.Clear();
2219 dual_average_.Clear();
2220 AssignVector(current_primal_solution_, ShardedWorkingQp().PrimalSharder(),
2221 /*dest=*/last_primal_start_point_);
2222 AssignVector(current_dual_solution_, ShardedWorkingQp().DualSharder(),
2223 /*dest=*/last_dual_start_point_);
2224}
2225
2226// Adds the work (`iteration_number`, `cumulative_kkt_matrix_passes`,
2227// `cumulative_rejected_steps`, and `cumulative_time_sec`) from
2228// `additional_work_stats` to `stats` and returns the result.
2229// `stats` is intentionally passed by value.
2230IterationStats AddWorkStats(IterationStats stats,
2231 const IterationStats& additional_work_stats) {
2232 stats.set_iteration_number(stats.iteration_number() +
2233 additional_work_stats.iteration_number());
2234 stats.set_cumulative_kkt_matrix_passes(
2235 stats.cumulative_kkt_matrix_passes() +
2236 additional_work_stats.cumulative_kkt_matrix_passes());
2237 stats.set_cumulative_rejected_steps(
2238 stats.cumulative_rejected_steps() +
2239 additional_work_stats.cumulative_rejected_steps());
2240 stats.set_cumulative_time_sec(stats.cumulative_time_sec() +
2241 additional_work_stats.cumulative_time_sec());
2242 return stats;
2243}
2244
2245// Accumulates and returns the work (`iteration_number`,
2246// `cumulative_kkt_matrix_passes`, `cumulative_rejected_steps`, and
2247// `cumulative_time_sec`) from `solve_log.feasibility_polishing_details`.
2248IterationStats WorkFromFeasibilityPolishing(const SolveLog& solve_log) {
2249 IterationStats result;
2250 for (const FeasibilityPolishingDetails& feasibility_polishing_detail :
2251 solve_log.feasibility_polishing_details()) {
2252 result = AddWorkStats(std::move(result),
2253 feasibility_polishing_detail.solution_stats());
2254 }
2255 return result;
2256}
2257
2258std::optional<SolverResult> Solver::MajorIterationAndTerminationCheck(
2259 const IterationType iteration_type, const bool force_numerical_termination,
2260 const std::atomic<bool>* interrupt_solve,
2261 const IterationStats& work_from_feasibility_polishing,
2262 SolveLog& solve_log) {
2263 const int major_iteration_cycle =
2264 iterations_completed_ % params_.major_iteration_frequency();
2265 const bool is_major_iteration =
2266 major_iteration_cycle == 0 && iterations_completed_ > 0;
2267 // Just decide what to do for now. The actual restart, if any, is
2268 // performed after the termination check.
2269 const RestartChoice restart = force_numerical_termination
2270 ? RESTART_CHOICE_NO_RESTART
2271 : ChooseRestartToApply(is_major_iteration);
2272 IterationStats stats = CreateSimpleIterationStats(restart);
2273 IterationStats full_work_stats =
2274 AddWorkStats(stats, work_from_feasibility_polishing);
2275 const bool check_termination =
2276 major_iteration_cycle % params_.termination_check_frequency() == 0 ||
2277 CheckSimpleTerminationCriteria(params_.termination_criteria(),
2278 full_work_stats, interrupt_solve)
2279 .has_value() ||
2280 force_numerical_termination;
2281 // We check termination on every major iteration.
2282 DCHECK(!is_major_iteration || check_termination);
2283 if (check_termination) {
2284 // Check for termination and update iteration stats with both simple and
2285 // solution statistics. The later are computationally harder to compute and
2286 // hence only computed here.
2287 VectorXd primal_average = PrimalAverage();
2288 VectorXd dual_average = DualAverage();
2289
2290 const std::optional<TerminationReasonAndPointType>
2291 maybe_termination_reason =
2292 preprocess_solver_->UpdateIterationStatsAndCheckTermination(
2293 params_, force_numerical_termination, current_primal_solution_,
2294 current_dual_solution_,
2295 primal_average_.HasNonzeroWeight() ? &primal_average : nullptr,
2296 dual_average_.HasNonzeroWeight() ? &dual_average : nullptr,
2297 current_primal_delta_.size() > 0 ? &current_primal_delta_
2298 : nullptr,
2299 current_dual_delta_.size() > 0 ? &current_dual_delta_ : nullptr,
2300 last_primal_start_point_, last_dual_start_point_,
2301 interrupt_solve, iteration_type, full_work_stats, stats);
2302 if (params_.record_iteration_stats()) {
2303 *solve_log.add_iteration_stats() = stats;
2304 }
2305 // We've terminated.
2306 if (maybe_termination_reason.has_value()) {
2307 IterationStats terminating_full_stats =
2308 AddWorkStats(stats, work_from_feasibility_polishing);
2309 return PickSolutionAndConstructSolverResult(
2310 std::move(primal_average), std::move(dual_average),
2311 terminating_full_stats, maybe_termination_reason->reason,
2312 maybe_termination_reason->type, std::move(solve_log));
2313 }
2314 } else if (params_.record_iteration_stats()) {
2315 // Record simple iteration stats only.
2316 *solve_log.add_iteration_stats() = stats;
2317 }
2318 ApplyRestartChoice(restart);
2319 return std::nullopt;
2320}
2321
2322void Solver::ResetAverageToCurrent() {
2323 primal_average_.Clear();
2324 dual_average_.Clear();
2325 primal_average_.Add(current_primal_solution_, /*weight=*/1.0);
2326 dual_average_.Add(current_dual_solution_, /*weight=*/1.0);
2327}
2328
2329void Solver::LogNumericalTermination(const Eigen::VectorXd& primal_delta,
2330 const Eigen::VectorXd& dual_delta) const {
2331 if (params_.verbosity_level() >= 2) {
2332 auto [primal_squared_norm, dual_squared_norm] =
2333 ComputeMovementTerms(primal_delta, dual_delta);
2334 SOLVER_LOG(&preprocess_solver_->Logger(),
2335 "Forced numerical termination at iteration ",
2336 iterations_completed_, " with primal delta squared norm ",
2337 primal_squared_norm, " dual delta squared norm ",
2338 dual_squared_norm, " primal weight ", primal_weight_);
2339 }
2340}
2341
2342void Solver::LogInnerIterationLimitHit() const {
2343 SOLVER_LOG(&preprocess_solver_->Logger(),
2344 "WARNING: Inner iteration limit reached at iteration ",
2345 iterations_completed_);
2346}
2347
2348InnerStepOutcome Solver::TakeMalitskyPockStep() {
2349 InnerStepOutcome outcome = InnerStepOutcome::kSuccessful;
2350 const double primal_step_size = step_size_ / primal_weight_;
2351 NextSolutionAndDelta next_primal_solution =
2352 ComputeNextPrimalSolution(primal_step_size);
2353 // The theory by Malitsky and Pock holds for any new_step_size in the interval
2354 // [`step_size`, `step_size` * sqrt(1 + `ratio_last_two_step_sizes_`)].
2355 // `dilating_coeff` determines where in this interval the new step size lands.
2356 // NOTE: Malitsky and Pock use theta for `ratio_last_two_step_sizes`.
2357 double dilating_coeff =
2358 1 + (params_.malitsky_pock_parameters().step_size_interpolation() *
2359 (sqrt(1 + ratio_last_two_step_sizes_) - 1));
2360 double new_primal_step_size = primal_step_size * dilating_coeff;
2361 double step_size_downscaling =
2362 params_.malitsky_pock_parameters().step_size_downscaling_factor();
2363 double contraction_factor =
2364 params_.malitsky_pock_parameters().linesearch_contraction_factor();
2365 const double dual_weight = primal_weight_ * primal_weight_;
2366 int inner_iterations = 0;
2367 for (bool accepted_step = false; !accepted_step; ++inner_iterations) {
2368 if (inner_iterations >= 60) {
2369 LogInnerIterationLimitHit();
2370 ResetAverageToCurrent();
2371 outcome = InnerStepOutcome::kForceNumericalTermination;
2372 break;
2373 }
2374 const double new_last_two_step_sizes_ratio =
2375 new_primal_step_size / primal_step_size;
2376 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
2377 dual_weight * new_primal_step_size, new_last_two_step_sizes_ratio,
2378 next_primal_solution);
2379
2380 VectorXd next_dual_product = TransposedMatrixVectorProduct(
2381 WorkingQp().constraint_matrix, next_dual_solution.value,
2382 ShardedWorkingQp().ConstraintMatrixSharder());
2383 double delta_dual_norm =
2384 Norm(next_dual_solution.delta, ShardedWorkingQp().DualSharder());
2385 double delta_dual_prod_norm =
2386 Distance(current_dual_product_, next_dual_product,
2387 ShardedWorkingQp().PrimalSharder());
2388 if (primal_weight_ * new_primal_step_size * delta_dual_prod_norm <=
2389 contraction_factor * delta_dual_norm) {
2390 // Accept new_step_size as a good step.
2391 step_size_ = new_primal_step_size * primal_weight_;
2392 ratio_last_two_step_sizes_ = new_last_two_step_sizes_ratio;
2393 // Malitsky and Pock guarantee uses a nonsymmetric weighted average,
2394 // the primal variable average involves the initial point, while the dual
2395 // doesn't. See Theorem 2 in https://arxiv.org/pdf/1608.08883.pdf for
2396 // details.
2397 if (!primal_average_.HasNonzeroWeight()) {
2398 primal_average_.Add(
2399 current_primal_solution_,
2400 /*weight=*/new_primal_step_size * new_last_two_step_sizes_ratio);
2401 }
2402
2403 current_primal_solution_ = std::move(next_primal_solution.value);
2404 current_dual_solution_ = std::move(next_dual_solution.value);
2405 current_dual_product_ = std::move(next_dual_product);
2406 primal_average_.Add(current_primal_solution_,
2407 /*weight=*/new_primal_step_size);
2408 dual_average_.Add(current_dual_solution_,
2409 /*weight=*/new_primal_step_size);
2410 const double movement =
2411 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
2412 if (movement == 0.0) {
2413 LogNumericalTermination(next_primal_solution.delta,
2414 next_dual_solution.delta);
2415 ResetAverageToCurrent();
2416 outcome = InnerStepOutcome::kForceNumericalTermination;
2417 } else if (movement > kDivergentMovement) {
2418 LogNumericalTermination(next_primal_solution.delta,
2419 next_dual_solution.delta);
2420 outcome = InnerStepOutcome::kForceNumericalTermination;
2421 }
2422 current_primal_delta_ = std::move(next_primal_solution.delta);
2423 current_dual_delta_ = std::move(next_dual_solution.delta);
2424 break;
2425 } else {
2426 new_primal_step_size = step_size_downscaling * new_primal_step_size;
2427 }
2428 }
2429 // `inner_iterations` isn't incremented for the accepted step.
2430 num_rejected_steps_ += inner_iterations;
2431 return outcome;
2432}
2433
2434InnerStepOutcome Solver::TakeAdaptiveStep() {
2435 InnerStepOutcome outcome = InnerStepOutcome::kSuccessful;
2436 int inner_iterations = 0;
2437 for (bool accepted_step = false; !accepted_step; ++inner_iterations) {
2438 if (inner_iterations >= 60) {
2439 LogInnerIterationLimitHit();
2440 ResetAverageToCurrent();
2441 outcome = InnerStepOutcome::kForceNumericalTermination;
2442 break;
2443 }
2444 const double primal_step_size = step_size_ / primal_weight_;
2445 const double dual_step_size = step_size_ * primal_weight_;
2446 NextSolutionAndDelta next_primal_solution =
2447 ComputeNextPrimalSolution(primal_step_size);
2448 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
2449 dual_step_size, /*extrapolation_factor=*/1.0, next_primal_solution);
2450 const double movement =
2451 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
2452 if (movement == 0.0) {
2453 LogNumericalTermination(next_primal_solution.delta,
2454 next_dual_solution.delta);
2455 ResetAverageToCurrent();
2456 outcome = InnerStepOutcome::kForceNumericalTermination;
2457 break;
2458 } else if (movement > kDivergentMovement) {
2459 LogNumericalTermination(next_primal_solution.delta,
2460 next_dual_solution.delta);
2461 outcome = InnerStepOutcome::kForceNumericalTermination;
2462 break;
2463 }
2464 VectorXd next_dual_product = TransposedMatrixVectorProduct(
2465 WorkingQp().constraint_matrix, next_dual_solution.value,
2466 ShardedWorkingQp().ConstraintMatrixSharder());
2467 const double nonlinearity =
2468 ComputeNonlinearity(next_primal_solution.delta, next_dual_product);
2469
2470 // See equation (5) in https://arxiv.org/pdf/2106.04756.pdf.
2471 const double step_size_limit =
2472 nonlinearity > 0 ? movement / nonlinearity
2473 : std::numeric_limits<double>::infinity();
2474
2475 if (step_size_ <= step_size_limit) {
2476 current_primal_solution_ = std::move(next_primal_solution.value);
2477 current_dual_solution_ = std::move(next_dual_solution.value);
2478 current_dual_product_ = std::move(next_dual_product);
2479 current_primal_delta_ = std::move(next_primal_solution.delta);
2480 current_dual_delta_ = std::move(next_dual_solution.delta);
2481 primal_average_.Add(current_primal_solution_, /*weight=*/step_size_);
2482 dual_average_.Add(current_dual_solution_, /*weight=*/step_size_);
2483 accepted_step = true;
2484 }
2485 const double total_steps_attempted =
2486 num_rejected_steps_ + inner_iterations + iterations_completed_ + 1;
2487 // Our step sizes are a factor 1 - (`total_steps_attempted` + 1)^(-
2488 // `step_size_reduction_exponent`) smaller than they could be as a margin to
2489 // reduce rejected steps.
2490 // The std::isinf() test protects against NAN if std::pow() == 1.0.
2491 const double first_term =
2492 std::isinf(step_size_limit)
2493 ? step_size_limit
2494 : (1 - std::pow(total_steps_attempted + 1.0,
2495 -params_.adaptive_linesearch_parameters()
2496 .step_size_reduction_exponent())) *
2497 step_size_limit;
2498 const double second_term =
2499 (1 + std::pow(total_steps_attempted + 1.0,
2500 -params_.adaptive_linesearch_parameters()
2501 .step_size_growth_exponent())) *
2502 step_size_;
2503 // From the first term when we have to reject a step, `step_size_`
2504 // decreases by a factor of at least 1 - (`total_steps_attempted` + 1)^(-
2505 // `step_size_reduction_exponent`). From the second term we increase
2506 // `step_size_` by a factor of at most 1 + (`total_steps_attempted` +
2507 // 1)^(-`step_size_growth_exponent`) Therefore if more than order
2508 // (`total_steps_attempted` + 1)^(`step_size_reduction_exponent`
2509 // - `step_size_growth_exponent`) fraction of the time we have a rejected
2510 // step, we overall decrease `step_size_`. When `step_size_` is
2511 // sufficiently small we stop having rejected steps.
2512 step_size_ = std::min(first_term, second_term);
2513 }
2514 // `inner_iterations` is incremented for the accepted step.
2515 num_rejected_steps_ += inner_iterations - 1;
2516 return outcome;
2517}
2518
2519InnerStepOutcome Solver::TakeConstantSizeStep() {
2520 const double primal_step_size = step_size_ / primal_weight_;
2521 const double dual_step_size = step_size_ * primal_weight_;
2522 NextSolutionAndDelta next_primal_solution =
2523 ComputeNextPrimalSolution(primal_step_size);
2524 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
2525 dual_step_size, /*extrapolation_factor=*/1.0, next_primal_solution);
2526 const double movement =
2527 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
2528 if (movement == 0.0) {
2529 LogNumericalTermination(next_primal_solution.delta,
2530 next_dual_solution.delta);
2531 ResetAverageToCurrent();
2532 return InnerStepOutcome::kForceNumericalTermination;
2533 } else if (movement > kDivergentMovement) {
2534 LogNumericalTermination(next_primal_solution.delta,
2535 next_dual_solution.delta);
2536 return InnerStepOutcome::kForceNumericalTermination;
2537 }
2538 VectorXd next_dual_product = TransposedMatrixVectorProduct(
2539 WorkingQp().constraint_matrix, next_dual_solution.value,
2540 ShardedWorkingQp().ConstraintMatrixSharder());
2541 current_primal_solution_ = std::move(next_primal_solution.value);
2542 current_dual_solution_ = std::move(next_dual_solution.value);
2543 current_dual_product_ = std::move(next_dual_product);
2544 current_primal_delta_ = std::move(next_primal_solution.delta);
2545 current_dual_delta_ = std::move(next_dual_solution.delta);
2546 primal_average_.Add(current_primal_solution_, /*weight=*/step_size_);
2547 dual_average_.Add(current_dual_solution_, /*weight=*/step_size_);
2548 return InnerStepOutcome::kSuccessful;
2549}
2550
2551IterationStats Solver::TotalWorkSoFar(const SolveLog& solve_log) const {
2552 IterationStats stats = CreateSimpleIterationStats(RESTART_CHOICE_NO_RESTART);
2553 IterationStats full_stats =
2554 AddWorkStats(stats, WorkFromFeasibilityPolishing(solve_log));
2555 return full_stats;
2556}
2557
2558FeasibilityPolishingDetails BuildFeasibilityPolishingDetails(
2559 PolishingPhaseType phase_type, int iteration_count,
2560 const PrimalDualHybridGradientParams& params, const SolveLog& solve_log) {
2561 FeasibilityPolishingDetails details;
2562 details.set_polishing_phase_type(phase_type);
2563 details.set_main_iteration_count(iteration_count);
2564 *details.mutable_params() = params;
2565 details.set_termination_reason(solve_log.termination_reason());
2566 details.set_iteration_count(solve_log.iteration_count());
2567 details.set_solve_time_sec(solve_log.solve_time_sec());
2568 *details.mutable_solution_stats() = solve_log.solution_stats();
2569 details.set_solution_type(solve_log.solution_type());
2570 absl::c_copy(solve_log.iteration_stats(),
2571 google::protobuf::RepeatedPtrFieldBackInserter(
2572 details.mutable_iteration_stats()));
2573 return details;
2574}
2575
2576// Note: `TERMINATION_REASON_INTERRUPTED_BY_USER` is treated as a work limit
2577// (that was determined in real-time by the user).
2578bool TerminationReasonIsWorkLimit(const TerminationReason reason) {
2579 return reason == TERMINATION_REASON_ITERATION_LIMIT ||
2580 reason == TERMINATION_REASON_TIME_LIMIT ||
2581 reason == TERMINATION_REASON_KKT_MATRIX_PASS_LIMIT ||
2582 reason == TERMINATION_REASON_INTERRUPTED_BY_USER;
2583}
2584
2585std::optional<SolverResult> Solver::TryFeasibilityPolishing(
2586 const int iteration_limit, const std::atomic<bool>* interrupt_solve,
2587 SolveLog& solve_log) {
2588 TerminationCriteria::DetailedOptimalityCriteria optimality_criteria =
2589 EffectiveOptimalityCriteria(params_.termination_criteria());
2590
2591 VectorXd average_primal = PrimalAverage();
2592 VectorXd average_dual = DualAverage();
2593
2594 ConvergenceInformation first_convergence_info;
2595 preprocess_solver_->ComputeConvergenceAndInfeasibilityFromWorkingSolution(
2596 params_, average_primal, average_dual, POINT_TYPE_AVERAGE_ITERATE,
2597 &first_convergence_info, nullptr);
2598
2599 // NOTE: Even though the objective gap sometimes is reduced by feasibility
2600 // polishing, it is usually increased, and an experiment (on MIPLIB2017)
2601 // shows that this test reduces the iteration count by 3-4% on average.
2602 if (!ObjectiveGapMet(optimality_criteria, first_convergence_info)) {
2603 if (params_.verbosity_level() >= 2) {
2604 SOLVER_LOG(&preprocess_solver_->Logger(),
2605 "Skipping feasibility polishing because the objective gap "
2606 "is too large.");
2607 }
2608 return std::nullopt;
2609 }
2610
2611 if (params_.verbosity_level() >= 2) {
2612 SOLVER_LOG(&preprocess_solver_->Logger(),
2613 "Starting primal feasibility polishing");
2614 }
2615 SolverResult primal_result = TryPrimalPolishing(
2616 std::move(average_primal), iteration_limit, interrupt_solve, solve_log);
2617
2618 if (params_.verbosity_level() >= 2) {
2619 SOLVER_LOG(
2620 &preprocess_solver_->Logger(),
2621 "Primal feasibility polishing termination reason: ",
2622 TerminationReason_Name(primal_result.solve_log.termination_reason()));
2623 }
2624 if (TerminationReasonIsWorkLimit(
2625 primal_result.solve_log.termination_reason())) {
2626 return std::nullopt;
2627 } else if (primal_result.solve_log.termination_reason() !=
2628 TERMINATION_REASON_OPTIMAL) {
2629 // Note: `TERMINATION_REASON_PRIMAL_INFEASIBLE` could happen normally, but
2630 // we haven't ensured that the correct solution is returned in that case, so
2631 // we ignore the polishing result indicating infeasibility.
2632 // `TERMINATION_REASON_NUMERICAL_ERROR` can occur, but would be surprising
2633 // and interesting. Other termination reasons are probably bugs.
2634 SOLVER_LOG(&preprocess_solver_->Logger(),
2635 "WARNING: Primal feasibility polishing terminated with error ",
2636 primal_result.solve_log.termination_reason());
2637 return std::nullopt;
2638 }
2639
2640 if (params_.verbosity_level() >= 2) {
2641 SOLVER_LOG(&preprocess_solver_->Logger(),
2642 "Starting dual feasibility polishing");
2643 }
2644 SolverResult dual_result = TryDualPolishing(
2645 std::move(average_dual), iteration_limit, interrupt_solve, solve_log);
2646
2647 if (params_.verbosity_level() >= 2) {
2648 SOLVER_LOG(
2649 &preprocess_solver_->Logger(),
2650 "Dual feasibility polishing termination reason: ",
2651 TerminationReason_Name(dual_result.solve_log.termination_reason()));
2652 }
2653
2654 if (TerminationReasonIsWorkLimit(
2655 dual_result.solve_log.termination_reason())) {
2656 return std::nullopt;
2657 } else if (dual_result.solve_log.termination_reason() !=
2658 TERMINATION_REASON_OPTIMAL) {
2659 // Note: The comment in the corresponding location when checking the
2660 // termination reason for primal feasibility polishing applies here
2661 // too.
2662 SOLVER_LOG(&preprocess_solver_->Logger(),
2663 "WARNING: Dual feasibility polishing terminated with error ",
2664 dual_result.solve_log.termination_reason());
2665 return std::nullopt;
2666 }
2667
2668 IterationStats full_stats = TotalWorkSoFar(solve_log);
2669 preprocess_solver_->ComputeConvergenceAndInfeasibilityFromWorkingSolution(
2670 params_, primal_result.primal_solution, dual_result.dual_solution,
2671 POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION,
2672 full_stats.add_convergence_information(), nullptr);
2673 if (params_.verbosity_level() >= 2) {
2674 SOLVER_LOG(&preprocess_solver_->Logger(),
2675 "solution stats for polished solution:");
2676 LogIterationStatsHeader(params_.verbosity_level(),
2677 /*use_feasibility_polishing=*/true,
2678 preprocess_solver_->Logger());
2679 LogIterationStats(params_.verbosity_level(),
2680 /*use_feasibility_polishing=*/true,
2681 IterationType::kFeasibilityPolishingTermination,
2682 full_stats, params_.termination_criteria(),
2683 preprocess_solver_->OriginalBoundNorms(),
2684 POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION,
2685 preprocess_solver_->Logger());
2686 }
2687 std::optional<TerminationReasonAndPointType> earned_termination =
2688 CheckIterateTerminationCriteria(params_.termination_criteria(),
2689 full_stats,
2690 preprocess_solver_->OriginalBoundNorms(),
2691 /*force_numerical_termination=*/false);
2692 if (earned_termination.has_value()) {
2693 return ConstructSolverResult(std::move(primal_result.primal_solution),
2694 std::move(dual_result.dual_solution),
2695 full_stats, earned_termination->reason,
2696 POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION,
2697 solve_log);
2698 }
2699 // Note: A typical termination check would now call
2700 // `CheckSimpleTerminationCriteria`. However, there is no obvious iterate to
2701 // use for a `SolverResult`. Instead, allow the main iteration loop to notice
2702 // if the termination criteria checking has caused us to meet the simple
2703 // termination criteria. This would be a rare case anyway, since dual
2704 // feasibility polishing didn't terminate because of a work limit, and
2705 // `full_stats` is created shortly after `TryDualPolishing` returned.
2706 return std::nullopt;
2707}
2708
2709TerminationCriteria ReduceWorkLimitsByPreviousWork(
2710 TerminationCriteria criteria, const int iteration_limit,
2711 const IterationStats& previous_work) {
2712 criteria.set_iteration_limit(std::max(
2713 0, std::min(iteration_limit, criteria.iteration_limit() -
2714 previous_work.iteration_number())));
2715 criteria.set_kkt_matrix_pass_limit(
2716 std::max(0.0, criteria.kkt_matrix_pass_limit() -
2717 previous_work.cumulative_kkt_matrix_passes()));
2718 criteria.set_time_sec_limit(std::max(
2719 0.0, criteria.time_sec_limit() - previous_work.cumulative_time_sec()));
2720 return criteria;
2721}
2722
2723SolverResult Solver::TryPrimalPolishing(
2724 VectorXd starting_primal_solution, const int iteration_limit,
2725 const std::atomic<bool>* interrupt_solve, SolveLog& solve_log) {
2726 PrimalDualHybridGradientParams primal_feasibility_params = params_;
2727 *primal_feasibility_params.mutable_termination_criteria() =
2728 ReduceWorkLimitsByPreviousWork(params_.termination_criteria(),
2729 iteration_limit,
2730 TotalWorkSoFar(solve_log));
2731
2732 // This will save the original objective after the swap.
2733 VectorXd objective;
2734 SetZero(ShardedWorkingQp().PrimalSharder(), objective);
2735 preprocess_solver_->SwapObjectiveVector(objective);
2736
2737 TerminationCriteria::DetailedOptimalityCriteria criteria =
2738 EffectiveOptimalityCriteria(params_.termination_criteria());
2739 const double kInfinity = std::numeric_limits<double>::infinity();
2740 criteria.set_eps_optimal_dual_residual_absolute(kInfinity);
2741 criteria.set_eps_optimal_dual_residual_relative(kInfinity);
2742 criteria.set_eps_optimal_objective_gap_absolute(kInfinity);
2743 criteria.set_eps_optimal_objective_gap_relative(kInfinity);
2744 *primal_feasibility_params.mutable_termination_criteria()
2745 ->mutable_detailed_optimality_criteria() = criteria;
2746 // TODO(user): Evaluate disabling primal weight updates for this primal
2747 // feasibility problem.
2748 VectorXd primal_feasibility_starting_dual;
2749 SetZero(ShardedWorkingQp().DualSharder(), primal_feasibility_starting_dual);
2750 Solver primal_solver(primal_feasibility_params,
2751 std::move(starting_primal_solution),
2752 std::move(primal_feasibility_starting_dual), step_size_,
2753 primal_weight_, preprocess_solver_);
2754 SolveLog primal_solve_log;
2755 // Stop `timer_`, since the time in `primal_solver.Solve()` will be recorded
2756 // by its timer.
2757 timer_.Stop();
2758 SolverResult primal_result = primal_solver.Solve(
2759 IterationType::kPrimalFeasibility, interrupt_solve, primal_solve_log);
2760 timer_.Start();
2761 // Restore the original objective.
2762 preprocess_solver_->SwapObjectiveVector(objective);
2763
2764 *solve_log.add_feasibility_polishing_details() =
2765 BuildFeasibilityPolishingDetails(
2766 POLISHING_PHASE_TYPE_PRIMAL_FEASIBILITY, iterations_completed_,
2767 primal_feasibility_params, primal_result.solve_log);
2768 return primal_result;
2769}
2770
2771VectorXd MapFiniteValuesToZero(const Sharder& sharder, const VectorXd& input) {
2772 VectorXd output(input.size());
2773 const auto make_finite_values_zero = [](const double x) {
2774 return std::isfinite(x) ? 0.0 : x;
2775 };
2776 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
2777 shard(output) = shard(input).unaryExpr(make_finite_values_zero);
2778 });
2779 return output;
2780}
2781
2782SolverResult Solver::TryDualPolishing(VectorXd starting_dual_solution,
2783 const int iteration_limit,
2784 const std::atomic<bool>* interrupt_solve,
2785 SolveLog& solve_log) {
2786 PrimalDualHybridGradientParams dual_feasibility_params = params_;
2787 *dual_feasibility_params.mutable_termination_criteria() =
2788 ReduceWorkLimitsByPreviousWork(params_.termination_criteria(),
2789 iteration_limit,
2790 TotalWorkSoFar(solve_log));
2791
2792 // These will initially contain the homogenous variable and constraint
2793 // bounds, but will contain the original variable and constraint bounds
2794 // after the swap.
2795 VectorXd constraint_lower_bounds = MapFiniteValuesToZero(
2796 ShardedWorkingQp().DualSharder(), WorkingQp().constraint_lower_bounds);
2797 VectorXd constraint_upper_bounds = MapFiniteValuesToZero(
2798 ShardedWorkingQp().DualSharder(), WorkingQp().constraint_upper_bounds);
2799 VectorXd variable_lower_bounds = MapFiniteValuesToZero(
2800 ShardedWorkingQp().PrimalSharder(), WorkingQp().variable_lower_bounds);
2801 VectorXd variable_upper_bounds = MapFiniteValuesToZero(
2802 ShardedWorkingQp().PrimalSharder(), WorkingQp().variable_upper_bounds);
2803 preprocess_solver_->SwapConstraintBounds(constraint_lower_bounds,
2804 constraint_upper_bounds);
2805 preprocess_solver_->SwapVariableBounds(variable_lower_bounds,
2806 variable_upper_bounds);
2807
2808 TerminationCriteria::DetailedOptimalityCriteria criteria =
2809 EffectiveOptimalityCriteria(params_.termination_criteria());
2810 const double kInfinity = std::numeric_limits<double>::infinity();
2811 criteria.set_eps_optimal_primal_residual_absolute(kInfinity);
2812 criteria.set_eps_optimal_primal_residual_relative(kInfinity);
2813 criteria.set_eps_optimal_objective_gap_absolute(kInfinity);
2814 criteria.set_eps_optimal_objective_gap_relative(kInfinity);
2815 *dual_feasibility_params.mutable_termination_criteria()
2816 ->mutable_detailed_optimality_criteria() = criteria;
2817 // TODO(user): Evaluate disabling primal weight updates for this dual
2818 // feasibility problem.
2819 VectorXd dual_feasibility_starting_primal;
2820 SetZero(ShardedWorkingQp().PrimalSharder(), dual_feasibility_starting_primal);
2821 Solver dual_solver(dual_feasibility_params,
2822 std::move(dual_feasibility_starting_primal),
2823 std::move(starting_dual_solution), step_size_,
2824 primal_weight_, preprocess_solver_);
2825 SolveLog dual_solve_log;
2826 // Stop `timer_`, since the time in `dual_solver.Solve()` will be recorded
2827 // by its timer.
2828 timer_.Stop();
2829 SolverResult dual_result = dual_solver.Solve(IterationType::kDualFeasibility,
2830 interrupt_solve, dual_solve_log);
2831 timer_.Start();
2832 // Restore the original bounds.
2833 preprocess_solver_->SwapConstraintBounds(constraint_lower_bounds,
2834 constraint_upper_bounds);
2835 preprocess_solver_->SwapVariableBounds(variable_lower_bounds,
2836 variable_upper_bounds);
2837 *solve_log.add_feasibility_polishing_details() =
2838 BuildFeasibilityPolishingDetails(
2839 POLISHING_PHASE_TYPE_DUAL_FEASIBILITY, iterations_completed_,
2840 dual_feasibility_params, dual_result.solve_log);
2841 return dual_result;
2842}
2843
2844SolverResult Solver::Solve(const IterationType iteration_type,
2845 const std::atomic<bool>* interrupt_solve,
2846 SolveLog solve_log) {
2847 preprocessing_time_sec_ = solve_log.preprocessing_time_sec();
2848 timer_.Start();
2849 last_primal_start_point_ =
2850 CloneVector(current_primal_solution_, ShardedWorkingQp().PrimalSharder());
2851 last_dual_start_point_ =
2852 CloneVector(current_dual_solution_, ShardedWorkingQp().DualSharder());
2853 // Note: Any cached values computed here also need to be recomputed after a
2854 // restart.
2855
2856 ratio_last_two_step_sizes_ = 1;
2857 current_dual_product_ = TransposedMatrixVectorProduct(
2858 WorkingQp().constraint_matrix, current_dual_solution_,
2859 ShardedWorkingQp().ConstraintMatrixSharder());
2860
2861 // This is set to true if we can't proceed any more because of numerical
2862 // issues. We may or may not have found the optimal solution.
2863 bool force_numerical_termination = false;
2864
2865 int next_feasibility_polishing_iteration = 100;
2866
2867 num_rejected_steps_ = 0;
2868
2869 IterationStats work_from_feasibility_polishing =
2870 WorkFromFeasibilityPolishing(solve_log);
2871 for (iterations_completed_ = 0;; ++iterations_completed_) {
2872 // This code performs the logic of the major iterations and termination
2873 // checks. It may modify the current solution and primal weight (e.g., when
2874 // performing a restart).
2875 const std::optional<SolverResult> maybe_result =
2876 MajorIterationAndTerminationCheck(
2877 iteration_type, force_numerical_termination, interrupt_solve,
2878 work_from_feasibility_polishing, solve_log);
2879 if (maybe_result.has_value()) {
2880 return maybe_result.value();
2881 }
2882
2883 if (params_.use_feasibility_polishing() &&
2884 iteration_type == IterationType::kNormal &&
2885 iterations_completed_ >= next_feasibility_polishing_iteration) {
2886 // The total number of iterations in feasibility polishing is at most
2887 // `4 * iterations_completed_ / kFeasibilityIterationFraction`.
2888 // One factor of two is because there are both primal and dual feasibility
2889 // polishing phases, and the other factor of two is because
2890 // `next_feasibility_polishing_iteration` increases by a factor of 2 each
2891 // feasibility polishing phase, so the sum of iteration limits is at most
2892 // twice the last value.
2893 const int kFeasibilityIterationFraction = 8;
2894 const std::optional<SolverResult> feasibility_result =
2895 TryFeasibilityPolishing(
2896 iterations_completed_ / kFeasibilityIterationFraction,
2897 interrupt_solve, solve_log);
2898 if (feasibility_result.has_value()) {
2899 return *feasibility_result;
2900 }
2901 next_feasibility_polishing_iteration *= 2;
2902 // Update work to include new feasibility phases.
2903 work_from_feasibility_polishing = WorkFromFeasibilityPolishing(solve_log);
2904 }
2905
2906 // TODO(user): If we use a step rule that could reject many steps in a
2907 // row, we should add a termination check within this loop also. For the
2908 // Malitsky and Pock rule, we perform a termination check and declare
2909 // NUMERICAL_ERROR whenever we hit 60 inner iterations.
2910 InnerStepOutcome outcome;
2911 switch (params_.linesearch_rule()) {
2912 case PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE:
2913 outcome = TakeMalitskyPockStep();
2914 break;
2915 case PrimalDualHybridGradientParams::ADAPTIVE_LINESEARCH_RULE:
2916 outcome = TakeAdaptiveStep();
2917 break;
2918 case PrimalDualHybridGradientParams::CONSTANT_STEP_SIZE_RULE:
2919 outcome = TakeConstantSizeStep();
2920 break;
2921 default:
2922 LOG(FATAL) << "Unrecognized linesearch rule "
2923 << params_.linesearch_rule();
2924 }
2925 if (outcome == InnerStepOutcome::kForceNumericalTermination) {
2926 force_numerical_termination = true;
2927 }
2928 } // loop over iterations
2929}
2930
2931} // namespace
2932
2934 QuadraticProgram qp, const PrimalDualHybridGradientParams& params,
2935 const std::atomic<bool>* interrupt_solve,
2936 std::function<void(const std::string&)> message_callback,
2937 IterationStatsCallback iteration_stats_callback) {
2938 return PrimalDualHybridGradient(std::move(qp), params, std::nullopt,
2939 interrupt_solve, std::move(message_callback),
2940 std::move(iteration_stats_callback));
2941}
2942
2944 QuadraticProgram qp, const PrimalDualHybridGradientParams& params,
2945 std::optional<PrimalAndDualSolution> initial_solution,
2946 const std::atomic<bool>* interrupt_solve,
2947 std::function<void(const std::string&)> message_callback,
2948 IterationStatsCallback iteration_stats_callback) {
2949 SolverLogger logger;
2950 logger.EnableLogging(true);
2951 if (message_callback) {
2952 logger.AddInfoLoggingCallback(std::move(message_callback));
2953 } else {
2954 logger.SetLogToStdOut(true);
2955 }
2956 const absl::Status params_status =
2958 if (!params_status.ok()) {
2959 return ErrorSolverResult(TERMINATION_REASON_INVALID_PARAMETER,
2960 params_status.ToString(), logger);
2961 }
2962 if (!qp.constraint_matrix.isCompressed()) {
2963 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
2964 "constraint_matrix must be in compressed format. "
2965 "Call constraint_matrix.makeCompressed()",
2966 logger);
2967 }
2968 const absl::Status dimensions_status = ValidateQuadraticProgramDimensions(qp);
2969 if (!dimensions_status.ok()) {
2970 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
2971 dimensions_status.ToString(), logger);
2972 }
2973 if (qp.objective_scaling_factor == 0) {
2974 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
2975 "The objective scaling factor cannot be zero.",
2976 logger);
2977 }
2978 if (params.use_feasibility_polishing() && !IsLinearProgram(qp)) {
2979 return ErrorSolverResult(
2980 TERMINATION_REASON_INVALID_PARAMETER,
2981 "use_feasibility_polishing is only implemented for linear programs.",
2982 logger);
2983 }
2984 PreprocessSolver solver(std::move(qp), params, &logger);
2985 return solver.PreprocessAndSolve(params, std::move(initial_solution),
2986 interrupt_solve,
2987 std::move(iteration_stats_callback));
2988}
2989
2990namespace internal {
2991
2994 glop::ProblemSolution glop_solution(
2995 glop::RowIndex(solution.dual_solution.size()),
2996 glop::ColIndex(solution.primal_solution.size()));
2997 // This doesn't matter much as glop's preprocessor doesn't use this much.
2998 // We pick IMPRECISE since we are often calling this code early in the solve.
2999 glop_solution.status = glop::ProblemStatus::IMPRECISE;
3000 for (glop::RowIndex i{0}; i.value() < solution.dual_solution.size(); ++i) {
3001 if (qp.constraint_lower_bounds[i.value()] ==
3002 qp.constraint_upper_bounds[i.value()]) {
3003 glop_solution.constraint_statuses[i] =
3005 } else if (solution.dual_solution[i.value()] > 0) {
3006 glop_solution.constraint_statuses[i] =
3008 } else if (solution.dual_solution[i.value()] < 0) {
3009 glop_solution.constraint_statuses[i] =
3011 } else {
3013 }
3014 }
3015
3016 for (glop::ColIndex i{0}; i.value() < solution.primal_solution.size(); ++i) {
3017 const bool at_lb = solution.primal_solution[i.value()] <=
3018 qp.variable_lower_bounds[i.value()];
3019 const bool at_ub = solution.primal_solution[i.value()] >=
3020 qp.variable_upper_bounds[i.value()];
3021 // Note that `ShardedWeightedAverage` is designed so that variables at their
3022 // bounds will be exactly at their bounds even with floating-point roundoff.
3023 if (at_lb) {
3024 if (at_ub) {
3026 } else {
3027 glop_solution.variable_statuses[i] =
3029 }
3030 } else {
3031 if (at_ub) {
3032 glop_solution.variable_statuses[i] =
3034 } else {
3036 }
3037 }
3038 }
3039 return glop_solution;
3040}
3041
3042} // namespace internal
3043
3044} // namespace operations_research::pdlp
double Get() const
Definition timer.h:46
void Start()
When Start() is called multiple times, only the most recent is used.
Definition timer.h:32
void SetLogToStdOut(bool enable)
Should all messages be displayed on stdout ?
Definition logging.h:52
void AddInfoLoggingCallback(std::function< void(const std::string &message)> callback)
Definition logging.cc:29
constexpr double kInfinity
Infinity for type Fractional.
Definition lp_types.h:87
Fractional SquaredNorm(const SparseColumn &v)
Definition lp_utils.cc:36
TerminationReason
The reason a call to Solve() terminates.
absl::StatusOr< SolveResult > Solve(const Model &model, const SolverType solver_type, const SolveArguments &solve_args, const SolverInitArguments &init_args)
Definition solve.cc:62
glop::ProblemSolution ComputeStatuses(const QuadraticProgram &qp, const PrimalAndDualSolution &solution)
Validation utilities for solvers.proto.
std::optional< TerminationReasonAndPointType > CheckSimpleTerminationCriteria(const TerminationCriteria &criteria, const IterationStats &stats, const std::atomic< bool > *interrupt_solve)
absl::StatusOr< MPModelProto > QpToMpModelProto(const QuadraticProgram &qp)
void SetZero(const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:174
InfeasibilityInformation ComputeInfeasibilityInformation(const PrimalDualHybridGradientParams &params, const ShardedQuadraticProgram &scaled_sharded_qp, const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec, const Eigen::VectorXd &scaled_primal_ray, const Eigen::VectorXd &scaled_dual_ray, const Eigen::VectorXd &primal_solution_for_residual_tests, PointType candidate_type)
std::optional< ConvergenceInformation > GetConvergenceInformation(const IterationStats &stats, PointType candidate_type)
absl::Status ValidateQuadraticProgramDimensions(const QuadraticProgram &qp)
absl::Status ValidatePrimalDualHybridGradientParams(const PrimalDualHybridGradientParams &params)
void SetRandomProjections(const ShardedQuadraticProgram &sharded_qp, const Eigen::VectorXd &primal_solution, const Eigen::VectorXd &dual_solution, const std::vector< int > &random_projection_seeds, PointMetadata &metadata)
double SquaredDistance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:253
double LInfNorm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:231
TerminationCriteria::DetailedOptimalityCriteria EffectiveOptimalityCriteria(const TerminationCriteria &termination_criteria)
Computes the effective optimality criteria for a TerminationCriteria.
double Distance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:260
VectorXd ReducedCosts(const PrimalDualHybridGradientParams &params, const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_solution, bool use_zero_primal_objective)
VectorXd TransposedMatrixVectorProduct(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:159
QuadraticProgramBoundNorms BoundNormsFromProblemStats(const QuadraticProgramStats &stats)
double EpsilonRatio(const double epsilon_absolute, const double epsilon_relative)
SingularValueAndIterations EstimateMaximumSingularValueOfConstraintMatrix(const ShardedQuadraticProgram &sharded_qp, const std::optional< VectorXd > &primal_solution, const std::optional< VectorXd > &dual_solution, const double desired_relative_error, const double failure_probability, std::mt19937 &mt_generator)
void ProjectToPrimalVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &primal, const bool use_feasibility_bounds)
absl::StatusOr< QuadraticProgram > QpFromMpModelProto(const MPModelProto &proto, bool relax_integer_variables, bool include_names)
bool IsLinearProgram(const QuadraticProgram &qp)
void ProjectToDualVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &dual)
std::optional< TerminationReasonAndPointType > CheckIterateTerminationCriteria(const TerminationCriteria &criteria, const IterationStats &stats, const QuadraticProgramBoundNorms &bound_norms, const bool force_numerical_termination)
void CoefficientWiseProductInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:212
void CoefficientWiseQuotientInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:219
VectorXd ZeroVector(const Sharder &sharder)
Like VectorXd::Zero(sharder.NumElements()).
Definition sharder.cc:180
VectorXd CloneVector(const VectorXd &vec, const Sharder &sharder)
Definition sharder.cc:206
SolverResult PrimalDualHybridGradient(QuadraticProgram qp, const PrimalDualHybridGradientParams &params, const std::atomic< bool > *interrupt_solve, std::function< void(const std::string &)> message_callback, IterationStatsCallback iteration_stats_callback)
VectorXd OnesVector(const Sharder &sharder)
Like VectorXd::Ones(sharder.NumElements()).
Definition sharder.cc:186
bool HasValidBounds(const ShardedQuadraticProgram &sharded_qp)
ScalingVectors ApplyRescaling(const RescalingOptions &rescaling_options, ShardedQuadraticProgram &sharded_qp)
bool ObjectiveGapMet(const TerminationCriteria::DetailedOptimalityCriteria &optimality_criteria, const ConvergenceInformation &stats)
ConvergenceInformation ComputeConvergenceInformation(const PrimalDualHybridGradientParams &params, const ShardedQuadraticProgram &scaled_sharded_qp, const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec, const Eigen::VectorXd &scaled_primal_solution, const Eigen::VectorXd &scaled_dual_solution, const double componentwise_primal_residual_offset, const double componentwise_dual_residual_offset, PointType candidate_type)
LocalizedLagrangianBounds ComputeLocalizedLagrangianBounds(const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_solution, const PrimalDualNorm primal_dual_norm, const double primal_weight, const double radius, const VectorXd *primal_product, const VectorXd *dual_product, const bool use_diagonal_qp_trust_region_solver, const double diagonal_qp_trust_region_solver_tolerance)
double Norm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:249
RelativeConvergenceInformation ComputeRelativeResiduals(const TerminationCriteria::DetailedOptimalityCriteria &optimality_criteria, const ConvergenceInformation &stats, const QuadraticProgramBoundNorms &bound_norms)
void AssignVector(const VectorXd &vec, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:200
double BoundGap(const LocalizedLagrangianBounds &bounds)
@ kNormal
An intermediate iteration in the "main" phase.
@ kPrimalFeasibility
An intermediate iteration during a primal feasibility polishing phase.
@ kFeasibilityPolishingTermination
Terminating with a solution found by feasibility polishing.
@ kPresolveTermination
Terminating with a solution found by presolve.
@ kDualFeasibility
An intermediate iteration during a dual feasibility polishing phase.
@ kNormalTermination
Terminating with a solution found by the "main" phase.
Select next search node to expand Select next item_i to add this new search node to the search Generate a new search node where item_i is not in the knapsack Check validity of this new partial solution(using propagators) - If valid
SetCoverModel::Stats ComputeStats(std::vector< T > sizes)
STL namespace.
static int input(yyscan_t yyscanner)
Contains the solution of a LinearProgram as returned by a preprocessor.
Definition lp_data.h:671
ConstraintStatusColumn constraint_statuses
Definition lp_data.h:700
ProblemStatus status
The solution status.
Definition lp_data.h:679
Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > constraint_matrix
#define SOLVER_LOG(logger,...)
Definition logging.h:109