Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
primal_dual_hybrid_gradient.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <atomic>
18#include <cmath>
19#include <cstdint>
20#include <functional>
21#include <limits>
22#include <optional>
23#include <random>
24#include <string>
25#include <utility>
26#include <vector>
27
28#include "Eigen/Core"
29#include "Eigen/SparseCore"
30#include "absl/algorithm/container.h"
31#include "absl/log/check.h"
32#include "absl/status/status.h"
33#include "absl/status/statusor.h"
34#include "absl/strings/str_cat.h"
35#include "absl/strings/str_format.h"
36#include "absl/strings/string_view.h"
37#include "absl/time/clock.h"
38#include "absl/time/time.h"
39#include "google/protobuf/repeated_ptr_field.h"
42#include "ortools/base/timer.h"
43#include "ortools/glop/parameters.pb.h"
45#include "ortools/linear_solver/linear_solver.pb.h"
54#include "ortools/pdlp/solve_log.pb.h"
55#include "ortools/pdlp/solvers.pb.h"
60
62
63namespace {
64
65using ::Eigen::VectorXd;
66using ::operations_research::SolverLogger;
67
68using IterationStatsCallback =
69 std::function<void(const IterationCallbackInfo&)>;
70
71// Computes a `num_threads` that is capped by the problem size and `num_shards`,
72// if specified, to avoid creating unusable threads.
73int NumThreads(const int num_threads, const int num_shards,
74 const QuadraticProgram& qp, SolverLogger& logger) {
75 int capped_num_threads = num_threads;
76 if (num_shards > 0) {
77 capped_num_threads = std::min(capped_num_threads, num_shards);
78 }
79 const int64_t problem_limit = std::max(qp.variable_lower_bounds.size(),
80 qp.constraint_lower_bounds.size());
81 capped_num_threads =
82 static_cast<int>(std::min(int64_t{capped_num_threads}, problem_limit));
83 capped_num_threads = std::max(capped_num_threads, 1);
84 if (capped_num_threads != num_threads) {
85 SOLVER_LOG(&logger, "WARNING: Reducing num_threads from ", num_threads,
86 " to ", capped_num_threads,
87 " because additional threads would be useless.");
88 }
89 return capped_num_threads;
90}
91
92// If `num_shards` is positive, returns it. Otherwise returns a reasonable
93// number of shards to use with `ShardedQuadraticProgram` for the given
94// `num_threads`.
95int NumShards(const int num_threads, const int num_shards) {
96 if (num_shards > 0) return num_shards;
97 return num_threads == 1 ? 1 : 4 * num_threads;
98}
99
100std::string ConvergenceInformationString(
101 const ConvergenceInformation& convergence_information,
102 const RelativeConvergenceInformation& relative_information,
103 const OptimalityNorm residual_norm) {
104 constexpr absl::string_view kFormatStr =
105 "%#12.6g %#12.6g %#12.6g | %#12.6g %#12.6g %#12.6g | %#12.6g %#12.6g | "
106 "%#12.6g %#12.6g";
107 switch (residual_norm) {
108 case OPTIMALITY_NORM_L_INF:
109 return absl::StrFormat(
110 kFormatStr, relative_information.relative_l_inf_primal_residual,
111 relative_information.relative_l_inf_dual_residual,
112 relative_information.relative_optimality_gap,
113 convergence_information.l_inf_primal_residual(),
114 convergence_information.l_inf_dual_residual(),
115 convergence_information.primal_objective() -
116 convergence_information.dual_objective(),
117 convergence_information.primal_objective(),
118 convergence_information.dual_objective(),
119 convergence_information.l2_primal_variable(),
120 convergence_information.l2_dual_variable());
121 case OPTIMALITY_NORM_L2:
122 return absl::StrFormat(kFormatStr,
123 relative_information.relative_l2_primal_residual,
124 relative_information.relative_l2_dual_residual,
125 relative_information.relative_optimality_gap,
126 convergence_information.l2_primal_residual(),
127 convergence_information.l2_dual_residual(),
128 convergence_information.primal_objective() -
129 convergence_information.dual_objective(),
130 convergence_information.primal_objective(),
131 convergence_information.dual_objective(),
132 convergence_information.l2_primal_variable(),
133 convergence_information.l2_dual_variable());
134 case OPTIMALITY_NORM_L_INF_COMPONENTWISE:
135 return absl::StrFormat(
136 kFormatStr,
137 convergence_information.l_inf_componentwise_primal_residual(),
138 convergence_information.l_inf_componentwise_dual_residual(),
139 relative_information.relative_optimality_gap,
140 convergence_information.l_inf_primal_residual(),
141 convergence_information.l_inf_dual_residual(),
142 convergence_information.primal_objective() -
143 convergence_information.dual_objective(),
144 convergence_information.primal_objective(),
145 convergence_information.dual_objective(),
146 convergence_information.l2_primal_variable(),
147 convergence_information.l2_dual_variable());
148 case OPTIMALITY_NORM_UNSPECIFIED:
149 LOG(FATAL) << "Residual norm not specified.";
150 }
151 LOG(FATAL) << "Invalid residual norm " << residual_norm << ".";
152}
153
154std::string ConvergenceInformationShortString(
155 const ConvergenceInformation& convergence_information,
156 const RelativeConvergenceInformation& relative_information,
157 const OptimalityNorm residual_norm) {
158 constexpr absl::string_view kFormatStr =
159 "%#10.4g %#10.4g %#10.4g | %#10.4g %#10.4g";
160 switch (residual_norm) {
161 case OPTIMALITY_NORM_L_INF:
162 return absl::StrFormat(
163 kFormatStr, relative_information.relative_l_inf_primal_residual,
164 relative_information.relative_l_inf_dual_residual,
165 relative_information.relative_optimality_gap,
166 convergence_information.primal_objective(),
167 convergence_information.dual_objective());
168 case OPTIMALITY_NORM_L2:
169 return absl::StrFormat(kFormatStr,
170 relative_information.relative_l2_primal_residual,
171 relative_information.relative_l2_dual_residual,
172 relative_information.relative_optimality_gap,
173 convergence_information.primal_objective(),
174 convergence_information.dual_objective());
175 case OPTIMALITY_NORM_L_INF_COMPONENTWISE:
176 return absl::StrFormat(
177 kFormatStr,
178 convergence_information.l_inf_componentwise_primal_residual(),
179 convergence_information.l_inf_componentwise_dual_residual(),
180 relative_information.relative_optimality_gap,
181 convergence_information.primal_objective(),
182 convergence_information.dual_objective());
183 case OPTIMALITY_NORM_UNSPECIFIED:
184 LOG(FATAL) << "Residual norm not specified.";
185 }
186 LOG(FATAL) << "Invalid residual norm " << residual_norm << ".";
187}
188
189// Logs a description of `iter_stats`, based on the
190// `iter_stats.convergence_information` entry with
191// `.candidate_type()==preferred_candidate` if one exists, otherwise based on
192// the first value, if any. `termination_criteria.optimality_norm` determines
193// which residual norms from `iter_stats.convergence_information` are used.
194void LogIterationStats(int verbosity_level, bool use_feasibility_polishing,
195 IterationType iteration_type,
196 const IterationStats& iter_stats,
197 const TerminationCriteria& termination_criteria,
198 const QuadraticProgramBoundNorms& bound_norms,
199 PointType preferred_candidate, SolverLogger& logger) {
200 std::string iteration_string =
201 verbosity_level >= 3
202 ? absl::StrFormat("%6d %8.1f %6.1f", iter_stats.iteration_number(),
203 iter_stats.cumulative_kkt_matrix_passes(),
204 iter_stats.cumulative_time_sec())
205 : absl::StrFormat("%6d %6.1f", iter_stats.iteration_number(),
206 iter_stats.cumulative_time_sec());
207 auto convergence_information =
208 GetConvergenceInformation(iter_stats, preferred_candidate);
209 if (!convergence_information.has_value() &&
210 iter_stats.convergence_information_size() > 0) {
211 convergence_information = iter_stats.convergence_information(0);
212 }
213 const char* phase_string = [&]() {
214 if (use_feasibility_polishing) {
215 switch (iteration_type) {
217 return "n ";
219 return "p ";
221 return "d ";
225 return "t ";
226 }
227 } else {
228 return "";
229 }
230 }();
231 if (convergence_information.has_value()) {
232 const char* iterate_string = [&]() {
233 if (verbosity_level >= 4) {
234 switch (convergence_information->candidate_type()) {
235 case POINT_TYPE_CURRENT_ITERATE:
236 return "C ";
237 case POINT_TYPE_AVERAGE_ITERATE:
238 return "A ";
239 case POINT_TYPE_ITERATE_DIFFERENCE:
240 return "D ";
241 case POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION:
242 return "F ";
243 default:
244 return "? ";
245 }
246 } else {
247 return "";
248 }
249 }();
250 const RelativeConvergenceInformation relative_information =
252 EffectiveOptimalityCriteria(termination_criteria),
253 *convergence_information, bound_norms);
254 std::string convergence_string =
255 verbosity_level >= 3
256 ? ConvergenceInformationString(
257 *convergence_information, relative_information,
258 termination_criteria.optimality_norm())
259 : ConvergenceInformationShortString(
260 *convergence_information, relative_information,
261 termination_criteria.optimality_norm());
262 SOLVER_LOG(&logger, phase_string, iterate_string, iteration_string, " | ",
263 convergence_string);
264 } else {
265 // No convergence information, just log the basic work stats.
266 SOLVER_LOG(&logger, phase_string, verbosity_level >= 4 ? "? " : "",
267 iteration_string);
268 }
269}
270
271void LogIterationStatsHeader(int verbosity_level,
272 bool use_feasibility_polishing,
273 SolverLogger& logger) {
274 std::string work_labels =
275 verbosity_level >= 3
276 ? absl::StrFormat("%6s %8s %6s", "iter#", "kkt_pass", "time")
277 : absl::StrFormat("%6s %6s", "iter#", "time");
278 std::string convergence_labels =
279 verbosity_level >= 3
280 ? absl::StrFormat(
281 "%12s %12s %12s | %12s %12s %12s | %12s %12s | %12s %12s",
282 "rel_prim_res", "rel_dual_res", "rel_gap", "prim_resid",
283 "dual_resid", "obj_gap", "prim_obj", "dual_obj", "prim_var_l2",
284 "dual_var_l2")
285 : absl::StrFormat("%10s %10s %10s | %10s %10s", "rel_p_res",
286 "rel_d_res", "rel_gap", "prim_obj", "dual_obj");
287 SOLVER_LOG(&logger, use_feasibility_polishing ? "f " : "",
288 verbosity_level >= 4 ? "I " : "", work_labels, " | ",
289 convergence_labels);
290}
291
292enum class InnerStepOutcome {
293 kSuccessful,
294 kForceNumericalTermination,
295};
296
297// Makes the closing changes to `solve_log` and builds a `SolverResult`.
298// NOTE: `primal_solution`, `dual_solution`, and `solve_log` are passed by
299// value. To avoid unnecessary copying, move these objects (i.e. use
300// `std::move()`).
301SolverResult ConstructSolverResult(VectorXd primal_solution,
302 VectorXd dual_solution,
303 const IterationStats& stats,
304 TerminationReason termination_reason,
305 PointType output_type, SolveLog solve_log) {
306 solve_log.set_iteration_count(stats.iteration_number());
307 solve_log.set_termination_reason(termination_reason);
308 solve_log.set_solution_type(output_type);
309 solve_log.set_solve_time_sec(stats.cumulative_time_sec());
310 *solve_log.mutable_solution_stats() = stats;
311 return SolverResult{.primal_solution = std::move(primal_solution),
312 .dual_solution = std::move(dual_solution),
313 .solve_log = std::move(solve_log)};
314}
315
316class PreprocessSolver {
317 public:
318 // Assumes that `qp` and `params` are valid.
319 // Note that the `qp` is intentionally passed by value.
320 // `logger` is captured, and must outlive the class.
321 // NOTE: Many `PreprocessSolver` methods accept a `params` argument. This is
322 // passed as an argument instead of stored as a member variable to support
323 // using different `params` in different contexts with the same
324 // `PreprocessSolver` object.
325 explicit PreprocessSolver(QuadraticProgram qp,
326 const PrimalDualHybridGradientParams& params,
327 SolverLogger* logger);
328
329 // Not copyable or movable (because `glop::MainLpPreprocessor` isn't).
330 PreprocessSolver(const PreprocessSolver&) = delete;
331 PreprocessSolver& operator=(const PreprocessSolver&) = delete;
332 PreprocessSolver(PreprocessSolver&&) = delete;
333 PreprocessSolver& operator=(PreprocessSolver&&) = delete;
334
335 // Zero is used if `initial_solution` is nullopt. If `interrupt_solve` is not
336 // nullptr, then the solver will periodically check if
337 // `interrupt_solve->load()` is true, in which case the solve will terminate
338 // with `TERMINATION_REASON_INTERRUPTED_BY_USER`. Ownership is not
339 // transferred. If `iteration_stats_callback` is not nullptr, then at each
340 // termination step (when iteration stats are logged),
341 // `iteration_stats_callback` will also be called with those iteration stats.
342 SolverResult PreprocessAndSolve(
343 const PrimalDualHybridGradientParams& params,
344 std::optional<PrimalAndDualSolution> initial_solution,
345 const std::atomic<bool>* interrupt_solve,
346 IterationStatsCallback iteration_stats_callback);
347
348 // Returns a `TerminationReasonAndPointType` when the termination criteria are
349 // satisfied, otherwise returns nothing. The pointers to working_* can be
350 // nullptr if an iterate of that type is not available. For the iterate types
351 // that are available, uses the primal and dual vectors to compute solution
352 // statistics and adds them to the stats proto.
353 // `full_stats` is used for checking work-based termination criteria,
354 // but `stats` is updated with convergence information.
355 // NOTE: The primal and dual input pairs should be scaled solutions.
356 // TODO(user): Move this long argument list into a struct.
357 std::optional<TerminationReasonAndPointType>
358 UpdateIterationStatsAndCheckTermination(
359 const PrimalDualHybridGradientParams& params,
360 bool force_numerical_termination, const VectorXd& working_primal_current,
361 const VectorXd& working_dual_current,
362 const VectorXd* working_primal_average,
363 const VectorXd* working_dual_average,
364 const VectorXd* working_primal_delta, const VectorXd* working_dual_delta,
365 const VectorXd& last_primal_start_point,
366 const VectorXd& last_dual_start_point,
367 const std::atomic<bool>* interrupt_solve, IterationType iteration_type,
368 const IterationStats& full_stats, IterationStats& stats);
369
370 // Computes solution statistics for the primal and dual input pair, which
371 // should be a scaled solution. If `convergence_information != nullptr`,
372 // populates it with convergence information, and similarly if
373 // `infeasibility_information != nullptr`, populates it with infeasibility
374 // information.
375 void ComputeConvergenceAndInfeasibilityFromWorkingSolution(
376 const PrimalDualHybridGradientParams& params,
377 const VectorXd& working_primal, const VectorXd& working_dual,
378 PointType candidate_type, ConvergenceInformation* convergence_information,
379 InfeasibilityInformation* infeasibility_information) const;
380
381 // Returns a `SolverResult` for the original problem, given a `SolverResult`
382 // from the scaled or preprocessed problem. Also computes the reduced costs.
383 // NOTE: `result` is passed by value. To avoid unnecessary copying, move this
384 // object (i.e. use `std::move()`).
385 SolverResult ConstructOriginalSolverResult(
386 const PrimalDualHybridGradientParams& params, SolverResult result,
387 SolverLogger& logger) const;
388
389 const ShardedQuadraticProgram& ShardedWorkingQp() const {
390 return sharded_qp_;
391 }
392
393 // Swaps `variable_lower_bounds` and `variable_upper_bounds` with the ones on
394 // the sharded quadratic program.
395 void SwapVariableBounds(VectorXd& variable_lower_bounds,
396 VectorXd& variable_upper_bounds) {
397 sharded_qp_.SwapVariableBounds(variable_lower_bounds,
398 variable_upper_bounds);
399 }
400
401 // Swaps `constraint_lower_bounds` and `constraint_upper_bounds` with the ones
402 // on the sharded quadratic program.
403 void SwapConstraintBounds(VectorXd& constraint_lower_bounds,
404 VectorXd& constraint_upper_bounds) {
405 sharded_qp_.SwapConstraintBounds(constraint_lower_bounds,
406 constraint_upper_bounds);
407 }
408
409 // Swaps `objective` with the objective vector on the quadratic program.
410 // Swapping `objective_matrix` is not yet supported because it hasn't been
411 // needed.
412 void SwapObjectiveVector(VectorXd& objective) {
413 sharded_qp_.SwapObjectiveVector(objective);
414 }
415
416 const QuadraticProgramBoundNorms& OriginalBoundNorms() const {
417 return original_bound_norms_;
418 }
419
420 SolverLogger& Logger() { return logger_; }
421
422 private:
423 struct PresolveInfo {
424 explicit PresolveInfo(ShardedQuadraticProgram original_qp,
425 const PrimalDualHybridGradientParams& params)
426 : preprocessor_parameters(PreprocessorParameters(params)),
428 sharded_original_qp(std::move(original_qp)),
430 OnesVector(sharded_original_qp.PrimalSharder())),
432 OnesVector(sharded_original_qp.DualSharder())) {}
433
434 glop::GlopParameters preprocessor_parameters;
435 glop::MainLpPreprocessor preprocessor;
436 ShardedQuadraticProgram sharded_original_qp;
439 };
440
441 // TODO(user): experiment with different preprocessor types.
442 static glop::GlopParameters PreprocessorParameters(
443 const PrimalDualHybridGradientParams& params);
444
445 // If presolve is enabled, moves `sharded_qp_` to
446 // `presolve_info_.sharded_original_qp` and computes the presolved linear
447 // program and installs it in `sharded_qp_`. Clears `initial_solution` if
448 // presolve is enabled. If presolve solves the problem completely returns the
449 // appropriate `TerminationReason`. Otherwise returns nullopt. If presolve
450 // is disabled or an error occurs modifies nothing and returns nullopt.
451 std::optional<TerminationReason> ApplyPresolveIfEnabled(
452 const PrimalDualHybridGradientParams& params,
453 std::optional<PrimalAndDualSolution>* initial_solution);
454
455 void ComputeAndApplyRescaling(const PrimalDualHybridGradientParams& params,
456 VectorXd& starting_primal_solution,
457 VectorXd& starting_dual_solution);
458
459 void LogQuadraticProgramStats(const QuadraticProgramStats& stats) const;
460
461 double InitialPrimalWeight(const PrimalDualHybridGradientParams& params,
462 double l2_norm_primal_linear_objective,
463 double l2_norm_constraint_bounds) const;
464
465 PrimalAndDualSolution RecoverOriginalSolution(
466 PrimalAndDualSolution working_solution) const;
467
468 // Adds one entry of `PointMetadata` to `stats` using the input solutions.
469 void AddPointMetadata(const PrimalDualHybridGradientParams& params,
470 const VectorXd& primal_solution,
471 const VectorXd& dual_solution, PointType point_type,
472 const VectorXd& last_primal_start_point,
473 const VectorXd& last_dual_start_point,
474 IterationStats& stats) const;
475
476 const QuadraticProgram& Qp() const { return sharded_qp_.Qp(); }
477
478 const int num_threads_;
479 const int num_shards_;
480
481 // The bound norms of the original problem.
482 QuadraticProgramBoundNorms original_bound_norms_;
483
484 // This is the QP that PDHG is run on. It is modified by presolve and
485 // rescaling, if those are enabled, and then serves as the
486 // `ShardedWorkingQp()` when calling `Solver::Solve()`. The original problem
487 // is available in `presolve_info_->sharded_original_qp` if
488 // `presolve_info_.has_value()`, and otherwise can be obtained by undoing the
489 // scaling of `sharded_qp_` by `col_scaling_vec_` and `row_scaling_vec_`.
490 ShardedQuadraticProgram sharded_qp_;
491
492 // Set iff presolve is enabled.
493 std::optional<PresolveInfo> presolve_info_;
494
495 // The scaling vectors that map the original (or presolved) quadratic program
496 // to the working version. See
497 // `ShardedQuadraticProgram::RescaleQuadraticProgram()` for details.
498 VectorXd col_scaling_vec_;
499 VectorXd row_scaling_vec_;
500
501 // A counter used to trigger writing iteration headers.
502 int log_counter_ = 0;
503 absl::Time time_of_last_log_ = absl::InfinitePast();
504 SolverLogger& logger_;
505 IterationStatsCallback iteration_stats_callback_;
506};
507
508class Solver {
509 public:
510 // `preprocess_solver` should not be nullptr, and the `PreprocessSolver`
511 // object must outlive this `Solver` object. Ownership is not transferred.
512 explicit Solver(const PrimalDualHybridGradientParams& params,
513 VectorXd starting_primal_solution,
514 VectorXd starting_dual_solution, double initial_step_size,
515 double initial_primal_weight,
516 PreprocessSolver* preprocess_solver);
517
518 // Not copyable or movable (because there are const members).
519 Solver(const Solver&) = delete;
520 Solver& operator=(const Solver&) = delete;
521 Solver(Solver&&) = delete;
522 Solver& operator=(Solver&&) = delete;
523
524 const QuadraticProgram& WorkingQp() const { return ShardedWorkingQp().Qp(); }
525
526 const ShardedQuadraticProgram& ShardedWorkingQp() const {
527 return preprocess_solver_->ShardedWorkingQp();
528 }
529
530 // Runs PDHG iterations on the instance that has been initialized in `Solver`.
531 // If `interrupt_solve` is not nullptr, then the solver will periodically
532 // check if `interrupt_solve->load()` is true, in which case the solve will
533 // terminate with `TERMINATION_REASON_INTERRUPTED_BY_USER`. Ownership is not
534 // transferred.
535 // `solve_log` should contain initial problem statistics.
536 // On return, `SolveResult.reduced_costs` will be empty, and the solution will
537 // be to the preprocessed/scaled problem.
538 SolverResult Solve(IterationType iteration_type,
539 const std::atomic<bool>* interrupt_solve,
540 SolveLog solve_log);
541
542 private:
543 struct NextSolutionAndDelta {
544 VectorXd value;
545 // `delta` is `value` - current_solution.
546 VectorXd delta;
547 };
548
549 struct DistanceBasedRestartInfo {
552 };
553
554 // Movement terms (weighted squared norms of primal and dual deltas) larger
555 // than this cause termination because iterates are diverging, and likely to
556 // cause infinite and NaN values.
557 constexpr static double kDivergentMovement = 1.0e100;
558
559 // Attempts to solve primal and dual feasibility subproblems starting at the
560 // average iterate, for at most `iteration_limit` iterations each. If
561 // successful, returns a `SolverResult`, otherwise nullopt. Appends
562 // information about the polishing phases to
563 // `solve_log.feasibility_polishing_details`.
564 std::optional<SolverResult> TryFeasibilityPolishing(
565 int iteration_limit, const std::atomic<bool>* interrupt_solve,
566 SolveLog& solve_log);
567
568 // Tries to find primal feasibility, adds the solve log details to
569 // `solve_log.feasibility_polishing_details`, and returns the result.
570 SolverResult TryPrimalPolishing(VectorXd starting_primal_solution,
571 int iteration_limit,
572 const std::atomic<bool>* interrupt_solve,
573 SolveLog& solve_log);
574
575 // Tries to find dual feasibility, adds the solve log details to
576 // `solve_log.feasibility_polishing_details`, and returns the result.
577 SolverResult TryDualPolishing(VectorXd starting_dual_solution,
578 int iteration_limit,
579 const std::atomic<bool>* interrupt_solve,
580 SolveLog& solve_log);
581
582 NextSolutionAndDelta ComputeNextPrimalSolution(double primal_step_size) const;
583
584 NextSolutionAndDelta ComputeNextDualSolution(
585 double dual_step_size, double extrapolation_factor,
586 const NextSolutionAndDelta& next_primal) const;
587
588 double ComputeMovement(const VectorXd& delta_primal,
589 const VectorXd& delta_dual) const;
590
591 double ComputeNonlinearity(const VectorXd& delta_primal,
592 const VectorXd& next_dual_product) const;
593
594 // Creates all the simple-to-compute statistics in stats.
595 IterationStats CreateSimpleIterationStats(RestartChoice restart_used) const;
596
597 // Returns the total work so far from the main iterations and feasibility
598 // polishing phases.
599 IterationStats TotalWorkSoFar(const SolveLog& solve_log) const;
600
601 RestartChoice ChooseRestartToApply(bool is_major_iteration);
602
603 VectorXd PrimalAverage() const;
604
605 VectorXd DualAverage() const;
606
607 double ComputeNewPrimalWeight() const;
608
609 // Picks the primal and dual solutions according to `output_type`, and makes
610 // the closing changes to `solve_log`. This function should only be called
611 // once when the solver is finishing its execution.
612 // NOTE: `primal_solution` and `dual_solution` are used as the output except
613 // when `output_type` is `POINT_TYPE_CURRENT_ITERATE` or
614 // `POINT_TYPE_ITERATE_DIFFERENCE`, in which case the values are computed from
615 // `Solver` data.
616 // NOTE: `primal_solution`, `dual_solution`, and `solve_log` are passed by
617 // value. To avoid unnecessary copying, move these objects (i.e. use
618 // `std::move()`).
619 SolverResult PickSolutionAndConstructSolverResult(
620 VectorXd primal_solution, VectorXd dual_solution,
621 const IterationStats& stats, TerminationReason termination_reason,
622 PointType output_type, SolveLog solve_log) const;
623
624 double DistanceTraveledFromLastStart(const VectorXd& primal_solution,
625 const VectorXd& dual_solution) const;
626
627 LocalizedLagrangianBounds ComputeLocalizedBoundsAtCurrent() const;
628
629 LocalizedLagrangianBounds ComputeLocalizedBoundsAtAverage() const;
630
631 // Applies the given `RestartChoice`. If a restart is chosen, updates the
632 // state of the algorithm accordingly and computes a new primal weight.
633 void ApplyRestartChoice(RestartChoice restart_to_apply);
634
635 std::optional<SolverResult> MajorIterationAndTerminationCheck(
636 IterationType iteration_type, bool force_numerical_termination,
637 const std::atomic<bool>* interrupt_solve,
638 const IterationStats& work_from_feasibility_polishing,
639 SolveLog& solve_log);
640
641 bool ShouldDoAdaptiveRestartHeuristic(double candidate_normalized_gap) const;
642
643 RestartChoice DetermineDistanceBasedRestartChoice() const;
644
645 void ResetAverageToCurrent();
646
647 void LogNumericalTermination() const;
648
649 void LogInnerIterationLimitHit() const;
650
651 // Takes a step based on the Malitsky and Pock linesearch algorithm.
652 // (https://arxiv.org/pdf/1608.08883.pdf)
653 // The current implementation is provably convergent (at an optimal rate)
654 // for LP programs (provided we do not change the primal weight at every major
655 // iteration). Further, we have observed that this rule is very sensitive to
656 // the parameter choice whenever we apply the primal weight recomputation
657 // heuristic.
658 InnerStepOutcome TakeMalitskyPockStep();
659
660 // Takes a step based on the adaptive heuristic presented in Section 3.1 of
661 // https://arxiv.org/pdf/2106.04756.pdf (further generalized to QP).
662 InnerStepOutcome TakeAdaptiveStep();
663
664 // Takes a constant-size step.
665 InnerStepOutcome TakeConstantSizeStep();
666
667 const PrimalDualHybridGradientParams params_;
668
669 VectorXd current_primal_solution_;
670 VectorXd current_dual_solution_;
671 VectorXd current_primal_delta_;
672 VectorXd current_dual_delta_;
673
674 ShardedWeightedAverage primal_average_;
675 ShardedWeightedAverage dual_average_;
676
677 double step_size_;
678 double primal_weight_;
679
680 PreprocessSolver* preprocess_solver_;
681
682 // For Malitsky-Pock linesearch only: `step_size_` / previous_step_size
683 double ratio_last_two_step_sizes_;
684 // For adaptive restarts only.
685 double normalized_gap_at_last_trial_ =
686 std::numeric_limits<double>::infinity();
687 // For adaptive restarts only.
688 double normalized_gap_at_last_restart_ =
689 std::numeric_limits<double>::infinity();
690
691 // `preprocessing_time_sec_` is added to the current value of `timer_` when
692 // computing `cumulative_time_sec` in iteration stats.
693 double preprocessing_time_sec_;
694 WallTimer timer_;
695 int iterations_completed_;
696 int num_rejected_steps_;
697 // A cache of `constraint_matrix.transpose() * current_dual_solution_`.
698 VectorXd current_dual_product_;
699 // The primal point at which the algorithm was last restarted from, or
700 // the initial primal starting point if no restart has occurred.
701 VectorXd last_primal_start_point_;
702 // The dual point at which the algorithm was last restarted from, or
703 // the initial dual starting point if no restart has occurred.
704 VectorXd last_dual_start_point_;
705 // Information for deciding whether to trigger a distance-based restart.
706 // The distances are initialized to +inf to force a restart during the first
707 // major iteration check.
708 DistanceBasedRestartInfo distance_based_restart_info_ = {
709 .distance_moved_last_restart_period =
710 std::numeric_limits<double>::infinity(),
711 .length_of_last_restart_period = 1,
712 };
713};
714
715PreprocessSolver::PreprocessSolver(QuadraticProgram qp,
716 const PrimalDualHybridGradientParams& params,
717 SolverLogger* logger)
718 : num_threads_(
719 NumThreads(params.num_threads(), params.num_shards(), qp, *logger)),
720 num_shards_(NumShards(num_threads_, params.num_shards())),
721 sharded_qp_(std::move(qp), num_threads_, num_shards_),
722 logger_(*logger) {}
723
724SolverResult ErrorSolverResult(const TerminationReason reason,
725 const std::string& message,
726 SolverLogger& logger) {
727 SolveLog error_log;
728 error_log.set_termination_reason(reason);
729 error_log.set_termination_string(message);
730 SOLVER_LOG(&logger,
731 "The solver did not run because of invalid input: ", message);
732 return SolverResult{.solve_log = error_log};
733}
734
735// If `check_excessively_small_values`, in addition to checking for extreme
736// values that PDLP can't handle, also check for extremely small constraint
737// bounds, variable bound gaps, and objective vector values that PDLP can handle
738// but that cause problems for other code such as glop's presolve.
739std::optional<SolverResult> CheckProblemStats(
740 const QuadraticProgramStats& problem_stats, const double objective_offset,
741 bool check_excessively_small_values, SolverLogger& logger) {
742 const double kExcessiveInputValue = 1e50;
743 const double kExcessivelySmallInputValue = 1e-50;
744 const double kMaxDynamicRange = 1e20;
745 if (std::isnan(problem_stats.constraint_matrix_l2_norm())) {
746 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
747 "Constraint matrix has a NAN.", logger);
748 }
749 if (problem_stats.constraint_matrix_abs_max() > kExcessiveInputValue) {
750 return ErrorSolverResult(
751 TERMINATION_REASON_INVALID_PROBLEM,
752 absl::StrCat("Constraint matrix has a non-zero with absolute value ",
753 problem_stats.constraint_matrix_abs_max(),
754 " which exceeds limit of ", kExcessiveInputValue, "."),
755 logger);
756 }
757 if (problem_stats.constraint_matrix_abs_max() >
758 kMaxDynamicRange * problem_stats.constraint_matrix_abs_min()) {
760 &logger, "WARNING: Constraint matrix has largest absolute value ",
761 problem_stats.constraint_matrix_abs_max(),
762 " and smallest non-zero absolute value ",
763 problem_stats.constraint_matrix_abs_min(), " performance may suffer.");
764 }
765 if (problem_stats.constraint_matrix_col_min_l_inf_norm() > 0 &&
766 problem_stats.constraint_matrix_col_min_l_inf_norm() <
767 kExcessivelySmallInputValue) {
768 return ErrorSolverResult(
769 TERMINATION_REASON_INVALID_PROBLEM,
770 absl::StrCat("Constraint matrix has a column with Linf norm ",
771 problem_stats.constraint_matrix_col_min_l_inf_norm(),
772 " which is less than limit of ",
773 kExcessivelySmallInputValue, "."),
774 logger);
775 }
776 if (problem_stats.constraint_matrix_row_min_l_inf_norm() > 0 &&
777 problem_stats.constraint_matrix_row_min_l_inf_norm() <
778 kExcessivelySmallInputValue) {
779 return ErrorSolverResult(
780 TERMINATION_REASON_INVALID_PROBLEM,
781 absl::StrCat("Constraint matrix has a row with Linf norm ",
782 problem_stats.constraint_matrix_row_min_l_inf_norm(),
783 " which is less than limit of ",
784 kExcessivelySmallInputValue, "."),
785 logger);
786 }
787 if (std::isnan(problem_stats.combined_bounds_l2_norm())) {
788 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
789 "Constraint bounds vector has a NAN.", logger);
790 }
791 if (problem_stats.combined_bounds_max() > kExcessiveInputValue) {
792 return ErrorSolverResult(
793 TERMINATION_REASON_INVALID_PROBLEM,
794 absl::StrCat("Combined constraint bounds vector has a non-zero with "
795 "absolute value ",
796 problem_stats.combined_bounds_max(),
797 " which exceeds limit of ", kExcessiveInputValue, "."),
798 logger);
799 }
800 if (check_excessively_small_values &&
801 problem_stats.combined_bounds_min() > 0 &&
802 problem_stats.combined_bounds_min() < kExcessivelySmallInputValue) {
803 return ErrorSolverResult(
804 TERMINATION_REASON_INVALID_PROBLEM,
805 absl::StrCat("Combined constraint bounds vector has a non-zero with "
806 "absolute value ",
807 problem_stats.combined_bounds_min(),
808 " which is less than the limit of ",
809 kExcessivelySmallInputValue, "."),
810 logger);
811 }
812 if (problem_stats.combined_bounds_max() >
813 kMaxDynamicRange * problem_stats.combined_bounds_min()) {
814 SOLVER_LOG(&logger,
815 "WARNING: Combined constraint bounds vector has largest "
816 "absolute value ",
817 problem_stats.combined_bounds_max(),
818 " and smallest non-zero absolute value ",
819 problem_stats.combined_bounds_min(),
820 "; performance may suffer.");
821 }
822 if (std::isnan(problem_stats.combined_variable_bounds_l2_norm())) {
823 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
824 "Variable bounds vector has a NAN.", logger);
825 }
826 if (problem_stats.combined_variable_bounds_max() > kExcessiveInputValue) {
827 return ErrorSolverResult(
828 TERMINATION_REASON_INVALID_PROBLEM,
829 absl::StrCat("Combined variable bounds vector has a non-zero with "
830 "absolute value ",
831 problem_stats.combined_variable_bounds_max(),
832 " which exceeds limit of ", kExcessiveInputValue, "."),
833 logger);
834 }
835 if (check_excessively_small_values &&
836 problem_stats.combined_variable_bounds_min() > 0 &&
837 problem_stats.combined_variable_bounds_min() <
838 kExcessivelySmallInputValue) {
839 return ErrorSolverResult(
840 TERMINATION_REASON_INVALID_PROBLEM,
841 absl::StrCat("Combined variable bounds vector has a non-zero with "
842 "absolute value ",
843 problem_stats.combined_variable_bounds_min(),
844 " which is less than the limit of ",
845 kExcessivelySmallInputValue, "."),
846 logger);
847 }
848 if (problem_stats.combined_variable_bounds_max() >
849 kMaxDynamicRange * problem_stats.combined_variable_bounds_min()) {
851 &logger,
852 "WARNING: Combined variable bounds vector has largest absolute value ",
853 problem_stats.combined_variable_bounds_max(),
854 " and smallest non-zero absolute value ",
855 problem_stats.combined_variable_bounds_min(),
856 "; performance may suffer.");
857 }
858 if (problem_stats.variable_bound_gaps_max() >
859 kMaxDynamicRange * problem_stats.variable_bound_gaps_min()) {
860 SOLVER_LOG(&logger,
861 "WARNING: Variable bound gap vector has largest absolute value ",
862 problem_stats.variable_bound_gaps_max(),
863 " and smallest non-zero absolute value ",
864 problem_stats.variable_bound_gaps_min(),
865 "; performance may suffer.");
866 }
867 if (std::isnan(objective_offset)) {
868 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
869 "Objective offset is NAN.", logger);
870 }
871 if (std::abs(objective_offset) > kExcessiveInputValue) {
872 return ErrorSolverResult(
873 TERMINATION_REASON_INVALID_PROBLEM,
874 absl::StrCat("Objective offset ", objective_offset,
875 " has absolute value which exceeds limit of ",
876 kExcessiveInputValue, "."),
877 logger);
878 }
879 if (std::isnan(problem_stats.objective_vector_l2_norm())) {
880 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
881 "Objective vector has a NAN.", logger);
882 }
883 if (problem_stats.objective_vector_abs_max() > kExcessiveInputValue) {
884 return ErrorSolverResult(
885 TERMINATION_REASON_INVALID_PROBLEM,
886 absl::StrCat("Objective vector has a non-zero with absolute value ",
887 problem_stats.objective_vector_abs_max(),
888 " which exceeds limit of ", kExcessiveInputValue, "."),
889 logger);
890 }
891 if (check_excessively_small_values &&
892 problem_stats.objective_vector_abs_min() > 0 &&
893 problem_stats.objective_vector_abs_min() < kExcessivelySmallInputValue) {
894 return ErrorSolverResult(
895 TERMINATION_REASON_INVALID_PROBLEM,
896 absl::StrCat("Objective vector has a non-zero with absolute value ",
897 problem_stats.objective_vector_abs_min(),
898 " which is less than the limit of ",
899 kExcessivelySmallInputValue, "."),
900 logger);
901 }
902 if (problem_stats.objective_vector_abs_max() >
903 kMaxDynamicRange * problem_stats.objective_vector_abs_min()) {
904 SOLVER_LOG(&logger, "WARNING: Objective vector has largest absolute value ",
905 problem_stats.objective_vector_abs_max(),
906 " and smallest non-zero absolute value ",
907 problem_stats.objective_vector_abs_min(),
908 "; performance may suffer.");
909 }
910 if (std::isnan(problem_stats.objective_matrix_l2_norm())) {
911 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
912 "Objective matrix has a NAN.", logger);
913 }
914 if (problem_stats.objective_matrix_abs_max() > kExcessiveInputValue) {
915 return ErrorSolverResult(
916 TERMINATION_REASON_INVALID_PROBLEM,
917 absl::StrCat("Objective matrix has a non-zero with absolute value ",
918 problem_stats.objective_matrix_abs_max(),
919 " which exceeds limit of ", kExcessiveInputValue, "."),
920 logger);
921 }
922 if (problem_stats.objective_matrix_abs_max() >
923 kMaxDynamicRange * problem_stats.objective_matrix_abs_min()) {
924 SOLVER_LOG(&logger, "WARNING: Objective matrix has largest absolute value ",
925 problem_stats.objective_matrix_abs_max(),
926 " and smallest non-zero absolute value ",
927 problem_stats.objective_matrix_abs_min(),
928 "; performance may suffer.");
929 }
930 return std::nullopt;
931}
932
933std::optional<SolverResult> CheckInitialSolution(
934 const ShardedQuadraticProgram& sharded_qp,
935 const PrimalAndDualSolution& initial_solution, SolverLogger& logger) {
936 const double kExcessiveInputValue = 1e50;
937 if (initial_solution.primal_solution.size() != sharded_qp.PrimalSize()) {
938 return ErrorSolverResult(
939 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
940 absl::StrCat("Initial primal solution has size ",
941 initial_solution.primal_solution.size(),
942 " which differs from problem primal size ",
943 sharded_qp.PrimalSize()),
944 logger);
945 }
946 if (std::isnan(
947 Norm(initial_solution.primal_solution, sharded_qp.PrimalSharder()))) {
948 return ErrorSolverResult(TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
949 "Initial primal solution has a NAN.", logger);
950 }
951 if (const double norm = LInfNorm(initial_solution.primal_solution,
952 sharded_qp.PrimalSharder());
953 norm > kExcessiveInputValue) {
954 return ErrorSolverResult(
955 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
956 absl::StrCat(
957 "Initial primal solution has an entry with absolute value ", norm,
958 " which exceeds limit of ", kExcessiveInputValue),
959 logger);
960 }
961 if (initial_solution.dual_solution.size() != sharded_qp.DualSize()) {
962 return ErrorSolverResult(
963 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
964 absl::StrCat("Initial dual solution has size ",
965 initial_solution.dual_solution.size(),
966 " which differs from problem dual size ",
967 sharded_qp.DualSize()),
968 logger);
969 }
970 if (std::isnan(
971 Norm(initial_solution.dual_solution, sharded_qp.DualSharder()))) {
972 return ErrorSolverResult(TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
973 "Initial dual solution has a NAN.", logger);
974 }
975 if (const double norm =
976 LInfNorm(initial_solution.dual_solution, sharded_qp.DualSharder());
977 norm > kExcessiveInputValue) {
978 return ErrorSolverResult(
979 TERMINATION_REASON_INVALID_INITIAL_SOLUTION,
980 absl::StrCat("Initial dual solution has an entry with absolute value ",
981 norm, " which exceeds limit of ", kExcessiveInputValue),
982 logger);
983 }
984 return std::nullopt;
985}
986
987SolverResult PreprocessSolver::PreprocessAndSolve(
988 const PrimalDualHybridGradientParams& params,
989 std::optional<PrimalAndDualSolution> initial_solution,
990 const std::atomic<bool>* interrupt_solve,
991 IterationStatsCallback iteration_stats_callback) {
992 WallTimer timer;
993 timer.Start();
994 SolveLog solve_log;
995 if (params.verbosity_level() >= 1) {
996 SOLVER_LOG(&logger_, "Solving with PDLP parameters: ", params);
997 }
998 if (Qp().problem_name.has_value()) {
999 solve_log.set_instance_name(*Qp().problem_name);
1000 }
1001 *solve_log.mutable_params() = params;
1002 sharded_qp_.ReplaceLargeConstraintBoundsWithInfinity(
1003 params.infinite_constraint_bound_threshold());
1004 if (!HasValidBounds(sharded_qp_)) {
1005 return ErrorSolverResult(
1006 TERMINATION_REASON_INVALID_PROBLEM,
1007 "The input problem has invalid bounds (after replacing large "
1008 "constraint bounds with infinity): some variable or constraint has "
1009 "lower_bound > upper_bound, lower_bound == inf, or upper_bound == "
1010 "-inf.",
1011 logger_);
1012 }
1013 if (Qp().objective_matrix.has_value() &&
1014 !sharded_qp_.PrimalSharder().ParallelTrueForAllShards(
1015 [&](const Sharder::Shard& shard) -> bool {
1016 return (shard(Qp().objective_matrix->diagonal()).array() >= 0.0)
1017 .all();
1018 })) {
1019 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
1020 "The objective is not convex (i.e., the objective "
1021 "matrix contains negative or NAN entries).",
1022 logger_);
1023 }
1024 *solve_log.mutable_original_problem_stats() = ComputeStats(sharded_qp_);
1025 const QuadraticProgramStats& original_problem_stats =
1026 solve_log.original_problem_stats();
1027 if (auto maybe_result =
1028 CheckProblemStats(original_problem_stats, Qp().objective_offset,
1029 params.presolve_options().use_glop(), logger_);
1030 maybe_result.has_value()) {
1031 return *maybe_result;
1032 }
1033 if (initial_solution.has_value()) {
1034 if (auto maybe_result =
1035 CheckInitialSolution(sharded_qp_, *initial_solution, logger_);
1036 maybe_result.has_value()) {
1037 return *maybe_result;
1038 }
1039 }
1040 original_bound_norms_ = BoundNormsFromProblemStats(original_problem_stats);
1041 const std::string preprocessing_string = absl::StrCat(
1042 params.presolve_options().use_glop() ? "presolving and " : "",
1043 "rescaling:");
1044 if (params.verbosity_level() >= 1) {
1045 SOLVER_LOG(&logger_, "Problem stats before ", preprocessing_string);
1046 LogQuadraticProgramStats(solve_log.original_problem_stats());
1047 }
1048 iteration_stats_callback_ = std::move(iteration_stats_callback);
1049 std::optional<TerminationReason> maybe_terminate =
1050 ApplyPresolveIfEnabled(params, &initial_solution);
1051 if (maybe_terminate.has_value()) {
1052 // Glop also feeds zero primal and dual solutions when the preprocessor
1053 // has a non-INIT status. When the preprocessor status is optimal the
1054 // vectors have length 0. When the status is something else the lengths
1055 // may be non-zero, but that's OK since we don't promise to produce a
1056 // meaningful solution in that case.
1057 IterationStats iteration_stats;
1058 iteration_stats.set_cumulative_time_sec(timer.Get());
1059 solve_log.set_preprocessing_time_sec(iteration_stats.cumulative_time_sec());
1060 VectorXd working_primal = ZeroVector(sharded_qp_.PrimalSharder());
1061 VectorXd working_dual = ZeroVector(sharded_qp_.DualSharder());
1062 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1063 params, working_primal, working_dual, POINT_TYPE_PRESOLVER_SOLUTION,
1064 iteration_stats.add_convergence_information(),
1065 iteration_stats.add_infeasibility_information());
1066 std::optional<TerminationReasonAndPointType> earned_termination =
1067 CheckIterateTerminationCriteria(params.termination_criteria(),
1068 iteration_stats, original_bound_norms_,
1069 /*force_numerical_termination=*/false);
1070 if (!earned_termination.has_value()) {
1071 earned_termination = CheckSimpleTerminationCriteria(
1072 params.termination_criteria(), iteration_stats, interrupt_solve);
1073 }
1074 TerminationReason final_termination_reason;
1075 if (earned_termination.has_value() &&
1076 (earned_termination->reason == TERMINATION_REASON_OPTIMAL ||
1077 earned_termination->reason == TERMINATION_REASON_PRIMAL_INFEASIBLE ||
1078 earned_termination->reason == TERMINATION_REASON_DUAL_INFEASIBLE)) {
1079 final_termination_reason = earned_termination->reason;
1080 } else {
1081 if (*maybe_terminate == TERMINATION_REASON_OPTIMAL) {
1082 final_termination_reason = TERMINATION_REASON_NUMERICAL_ERROR;
1083 SOLVER_LOG(
1084 &logger_,
1085 "WARNING: Presolve claimed to solve the LP optimally but the "
1086 "solution doesn't satisfy the optimality criteria.");
1087 } else {
1088 final_termination_reason = *maybe_terminate;
1089 }
1090 }
1091 return ConstructOriginalSolverResult(
1092 params,
1093 ConstructSolverResult(
1094 std::move(working_primal), std::move(working_dual),
1095 std::move(iteration_stats), final_termination_reason,
1096 POINT_TYPE_PRESOLVER_SOLUTION, std::move(solve_log)),
1097 logger_);
1098 }
1099
1100 VectorXd starting_primal_solution;
1101 VectorXd starting_dual_solution;
1102 // The current solution is updated by `ComputeAndApplyRescaling`.
1103 if (initial_solution.has_value()) {
1104 starting_primal_solution = std::move(initial_solution->primal_solution);
1105 starting_dual_solution = std::move(initial_solution->dual_solution);
1106 } else {
1107 SetZero(sharded_qp_.PrimalSharder(), starting_primal_solution);
1108 SetZero(sharded_qp_.DualSharder(), starting_dual_solution);
1109 }
1110 // The following projections are necessary since all our checks assume that
1111 // the primal and dual variable bounds are satisfied.
1112 ProjectToPrimalVariableBounds(sharded_qp_, starting_primal_solution);
1113 ProjectToDualVariableBounds(sharded_qp_, starting_dual_solution);
1114
1115 ComputeAndApplyRescaling(params, starting_primal_solution,
1116 starting_dual_solution);
1117 *solve_log.mutable_preprocessed_problem_stats() = ComputeStats(sharded_qp_);
1118 if (params.verbosity_level() >= 1) {
1119 SOLVER_LOG(&logger_, "Problem stats after ", preprocessing_string);
1120 LogQuadraticProgramStats(solve_log.preprocessed_problem_stats());
1121 }
1122
1123 double step_size = 0.0;
1124 if (params.linesearch_rule() ==
1125 PrimalDualHybridGradientParams::CONSTANT_STEP_SIZE_RULE) {
1126 std::mt19937 random(1);
1127 double inverse_step_size;
1128 const auto lipschitz_result =
1130 sharded_qp_, std::nullopt, std::nullopt,
1131 /*desired_relative_error=*/0.2, /*failure_probability=*/0.0005,
1132 random);
1133 // With high probability, `lipschitz_result.singular_value` is within
1134 // +/- `lipschitz_result.estimated_relative_error
1135 // * lipschitz_result.singular_value`
1136 const double lipschitz_term_upper_bound =
1137 lipschitz_result.singular_value /
1138 (1.0 - lipschitz_result.estimated_relative_error);
1139 inverse_step_size = lipschitz_term_upper_bound;
1140 step_size = inverse_step_size > 0.0 ? 1.0 / inverse_step_size : 1.0;
1141 } else {
1142 // This initial step size is designed to err on the side of being too big.
1143 // This is because
1144 // (i) too-big steps are rejected and hence don't hurt us other than
1145 // wasting
1146 // an iteration and
1147 // (ii) the step size adjustment algorithm shrinks the step size as far as
1148 // needed in a single iteration but raises it slowly.
1149 // The tiny constant is there to keep the step size finite in the case of a
1150 // trivial LP with no constraints.
1151 step_size =
1152 1.0 /
1153 std::max(
1154 1.0e-20,
1155 solve_log.preprocessed_problem_stats().constraint_matrix_abs_max());
1156 }
1157 step_size *= params.initial_step_size_scaling();
1158
1159 const double primal_weight = InitialPrimalWeight(
1160 params, solve_log.preprocessed_problem_stats().objective_vector_l2_norm(),
1161 solve_log.preprocessed_problem_stats().combined_bounds_l2_norm());
1162
1163 Solver solver(params, starting_primal_solution, starting_dual_solution,
1164 step_size, primal_weight, this);
1165 solve_log.set_preprocessing_time_sec(timer.Get());
1166 SolverResult result = solver.Solve(IterationType::kNormal, interrupt_solve,
1167 std::move(solve_log));
1168 return ConstructOriginalSolverResult(params, std::move(result), logger_);
1169}
1170
1171glop::GlopParameters PreprocessSolver::PreprocessorParameters(
1172 const PrimalDualHybridGradientParams& params) {
1173 glop::GlopParameters glop_params;
1174 // TODO(user): Test if dualization helps or hurts performance.
1175 glop_params.set_solve_dual_problem(glop::GlopParameters::NEVER_DO);
1176 // Experiments show that this preprocessing step can hurt because it relaxes
1177 // variable bounds.
1178 glop_params.set_use_implied_free_preprocessor(false);
1179 // We do our own scaling.
1180 glop_params.set_use_scaling(false);
1181 if (params.presolve_options().has_glop_parameters()) {
1182 glop_params.MergeFrom(params.presolve_options().glop_parameters());
1183 }
1184 return glop_params;
1185}
1186
1187TerminationReason GlopStatusToTerminationReason(
1188 const glop::ProblemStatus glop_status, SolverLogger& logger) {
1189 switch (glop_status) {
1190 case glop::ProblemStatus::OPTIMAL:
1191 return TERMINATION_REASON_OPTIMAL;
1192 case glop::ProblemStatus::INVALID_PROBLEM:
1193 return TERMINATION_REASON_INVALID_PROBLEM;
1194 case glop::ProblemStatus::ABNORMAL:
1195 case glop::ProblemStatus::IMPRECISE:
1196 return TERMINATION_REASON_NUMERICAL_ERROR;
1197 case glop::ProblemStatus::PRIMAL_INFEASIBLE:
1198 case glop::ProblemStatus::DUAL_INFEASIBLE:
1199 case glop::ProblemStatus::INFEASIBLE_OR_UNBOUNDED:
1200 case glop::ProblemStatus::DUAL_UNBOUNDED:
1201 case glop::ProblemStatus::PRIMAL_UNBOUNDED:
1202 return TERMINATION_REASON_PRIMAL_OR_DUAL_INFEASIBLE;
1203 default:
1204 SOLVER_LOG(&logger, "WARNING: Unexpected preprocessor status ",
1205 glop_status);
1206 return TERMINATION_REASON_OTHER;
1207 }
1208}
1209
1210std::optional<TerminationReason> PreprocessSolver::ApplyPresolveIfEnabled(
1211 const PrimalDualHybridGradientParams& params,
1212 std::optional<PrimalAndDualSolution>* const initial_solution) {
1213 const bool presolve_enabled = params.presolve_options().use_glop();
1214 if (!presolve_enabled) {
1215 return std::nullopt;
1216 }
1217 if (!IsLinearProgram(Qp())) {
1218 SOLVER_LOG(&logger_,
1219 "WARNING: Skipping presolve, which is only supported for linear "
1220 "programs");
1221 return std::nullopt;
1222 }
1223 absl::StatusOr<MPModelProto> model = QpToMpModelProto(Qp());
1224 if (!model.ok()) {
1225 SOLVER_LOG(&logger_,
1226 "WARNING: Skipping presolve because of error converting to "
1227 "MPModelProto: ",
1228 model.status());
1229 return std::nullopt;
1230 }
1231 if (initial_solution->has_value()) {
1232 SOLVER_LOG(&logger_,
1233 "WARNING: Ignoring initial solution. Initial solutions are "
1234 "ignored when presolve is on.");
1235 initial_solution->reset();
1236 }
1237 glop::LinearProgram glop_lp;
1238 glop::MPModelProtoToLinearProgram(*model, &glop_lp);
1239 // Save RAM.
1240 model->Clear();
1241 presolve_info_.emplace(std::move(sharded_qp_), params);
1242 // To simplify our code we ignore the return value indicating whether
1243 // postprocessing is required. We simply call `RecoverSolution()`
1244 // unconditionally, which may do nothing.
1245 presolve_info_->preprocessor.Run(&glop_lp);
1246 presolve_info_->presolved_problem_was_maximization =
1247 glop_lp.IsMaximizationProblem();
1248 MPModelProto output;
1249 glop::LinearProgramToMPModelProto(glop_lp, &output);
1250 // This will only fail if given an invalid LP, which shouldn't happen.
1251 absl::StatusOr<QuadraticProgram> presolved_qp =
1252 QpFromMpModelProto(output, /*relax_integer_variables=*/false);
1253 CHECK_OK(presolved_qp.status());
1254 // `MPModelProto` doesn't support scaling factors, so if `glop_lp` has an
1255 // `objective_scaling_factor` it won't be set in output and `presolved_qp`.
1256 // The scaling factor of `presolved_qp` isn't actually used anywhere, but we
1257 // set it for completeness.
1258 presolved_qp->objective_scaling_factor = glop_lp.objective_scaling_factor();
1259 sharded_qp_ = ShardedQuadraticProgram(std::move(*presolved_qp), num_threads_,
1260 num_shards_);
1261 // A status of `INIT` means the preprocessor created a (usually) smaller
1262 // problem that needs solving. Other statuses mean the preprocessor solved
1263 // the problem completely.
1264 if (presolve_info_->preprocessor.status() != glop::ProblemStatus::INIT) {
1265 col_scaling_vec_ = OnesVector(sharded_qp_.PrimalSharder());
1266 row_scaling_vec_ = OnesVector(sharded_qp_.DualSharder());
1267 return GlopStatusToTerminationReason(presolve_info_->preprocessor.status(),
1268 logger_);
1269 }
1270 return std::nullopt;
1271}
1272
1273void PreprocessSolver::ComputeAndApplyRescaling(
1274 const PrimalDualHybridGradientParams& params,
1275 VectorXd& starting_primal_solution, VectorXd& starting_dual_solution) {
1276 ScalingVectors scaling = ApplyRescaling(
1277 RescalingOptions{.l_inf_ruiz_iterations = params.l_inf_ruiz_iterations(),
1278 .l2_norm_rescaling = params.l2_norm_rescaling()},
1279 sharded_qp_);
1280 row_scaling_vec_ = std::move(scaling.row_scaling_vec);
1281 col_scaling_vec_ = std::move(scaling.col_scaling_vec);
1282
1283 CoefficientWiseQuotientInPlace(col_scaling_vec_, sharded_qp_.PrimalSharder(),
1284 starting_primal_solution);
1285 CoefficientWiseQuotientInPlace(row_scaling_vec_, sharded_qp_.DualSharder(),
1286 starting_dual_solution);
1287}
1288
1289void PreprocessSolver::LogQuadraticProgramStats(
1290 const QuadraticProgramStats& stats) const {
1291 SOLVER_LOG(&logger_,
1292 absl::StrFormat("There are %i variables, %i constraints, and %i "
1293 "constraint matrix nonzeros.",
1294 stats.num_variables(), stats.num_constraints(),
1295 stats.constraint_matrix_num_nonzeros()));
1296 if (Qp().constraint_matrix.nonZeros() > 0) {
1297 SOLVER_LOG(&logger_,
1298 absl::StrFormat("Absolute values of nonzero constraint matrix "
1299 "elements: largest=%f, "
1300 "smallest=%f, avg=%f",
1301 stats.constraint_matrix_abs_max(),
1302 stats.constraint_matrix_abs_min(),
1303 stats.constraint_matrix_abs_avg()));
1304 SOLVER_LOG(
1305 &logger_,
1306 absl::StrFormat("Constraint matrix, infinity norm: max(row & col)=%f, "
1307 "min_col=%f, min_row=%f",
1308 stats.constraint_matrix_abs_max(),
1309 stats.constraint_matrix_col_min_l_inf_norm(),
1310 stats.constraint_matrix_row_min_l_inf_norm()));
1311 SOLVER_LOG(
1312 &logger_,
1313 absl::StrFormat(
1314 "Constraint bounds statistics (max absolute value per row): "
1315 "largest=%f, smallest=%f, avg=%f, l2_norm=%f",
1316 stats.combined_bounds_max(), stats.combined_bounds_min(),
1317 stats.combined_bounds_avg(), stats.combined_bounds_l2_norm()));
1318 }
1319 if (!IsLinearProgram(Qp())) {
1320 SOLVER_LOG(&logger_,
1321 absl::StrFormat("There are %i nonzero diagonal coefficients in "
1322 "the objective matrix.",
1323 stats.objective_matrix_num_nonzeros()));
1324 SOLVER_LOG(
1325 &logger_,
1326 absl::StrFormat(
1327 "Absolute values of nonzero objective matrix elements: largest=%f, "
1328 "smallest=%f, avg=%f",
1329 stats.objective_matrix_abs_max(), stats.objective_matrix_abs_min(),
1330 stats.objective_matrix_abs_avg()));
1331 }
1332 SOLVER_LOG(&logger_, absl::StrFormat("Absolute values of objective vector "
1333 "elements: largest=%f, smallest=%f, "
1334 "avg=%f, l2_norm=%f",
1335 stats.objective_vector_abs_max(),
1336 stats.objective_vector_abs_min(),
1337 stats.objective_vector_abs_avg(),
1338 stats.objective_vector_l2_norm()));
1339 SOLVER_LOG(
1340 &logger_,
1341 absl::StrFormat(
1342 "Gaps between variable upper and lower bounds: #finite=%i of %i, "
1343 "largest=%f, smallest=%f, avg=%f",
1344 stats.variable_bound_gaps_num_finite(), stats.num_variables(),
1345 stats.variable_bound_gaps_max(), stats.variable_bound_gaps_min(),
1346 stats.variable_bound_gaps_avg()));
1347}
1348
1349double PreprocessSolver::InitialPrimalWeight(
1350 const PrimalDualHybridGradientParams& params,
1351 const double l2_norm_primal_linear_objective,
1352 const double l2_norm_constraint_bounds) const {
1353 if (params.has_initial_primal_weight()) {
1354 return params.initial_primal_weight();
1355 }
1356 if (l2_norm_primal_linear_objective > 0.0 &&
1357 l2_norm_constraint_bounds > 0.0) {
1358 // The hand-wavy motivation for this choice is that the objective vector
1359 // has units of (objective units)/(primal units) and the constraint
1360 // bounds vector has units of (objective units)/(dual units),
1361 // therefore this ratio has units (dual units)/(primal units). By
1362 // dimensional analysis, these are the same units as the primal weight.
1363 return l2_norm_primal_linear_objective / l2_norm_constraint_bounds;
1364 } else {
1365 return 1.0;
1366 }
1367}
1368
1369PrimalAndDualSolution PreprocessSolver::RecoverOriginalSolution(
1370 PrimalAndDualSolution working_solution) const {
1371 glop::ProblemSolution glop_solution(glop::RowIndex{0}, glop::ColIndex{0});
1372 if (presolve_info_.has_value()) {
1373 // We compute statuses relative to the working problem so we can detect when
1374 // variables are at their bounds without floating-point roundoff induced by
1375 // scaling.
1376 glop_solution = internal::ComputeStatuses(Qp(), working_solution);
1377 }
1378 CoefficientWiseProductInPlace(col_scaling_vec_, sharded_qp_.PrimalSharder(),
1379 working_solution.primal_solution);
1380 CoefficientWiseProductInPlace(row_scaling_vec_, sharded_qp_.DualSharder(),
1381 working_solution.dual_solution);
1382 if (presolve_info_.has_value()) {
1383 glop_solution.primal_values =
1384 glop::DenseRow(working_solution.primal_solution.begin(),
1385 working_solution.primal_solution.end());
1386 glop_solution.dual_values =
1387 glop::DenseColumn(working_solution.dual_solution.begin(),
1388 working_solution.dual_solution.end());
1389 // We got the working QP by calling `LinearProgramToMPModelProto()` and
1390 // `QpFromMpModelProto()`. We need to negate the duals if the LP resulting
1391 // from presolve was a max problem.
1392 if (presolve_info_->presolved_problem_was_maximization) {
1393 for (glop::RowIndex i{0}; i < glop_solution.dual_values.size(); ++i) {
1394 glop_solution.dual_values[i] *= -1;
1395 }
1396 }
1397 presolve_info_->preprocessor.RecoverSolution(&glop_solution);
1398 PrimalAndDualSolution solution;
1399 solution.primal_solution =
1400 Eigen::Map<Eigen::VectorXd>(glop_solution.primal_values.data(),
1401 glop_solution.primal_values.size().value());
1402 solution.dual_solution =
1403 Eigen::Map<Eigen::VectorXd>(glop_solution.dual_values.data(),
1404 glop_solution.dual_values.size().value());
1405 // We called `QpToMpModelProto()` and `MPModelProtoToLinearProgram()` to
1406 // convert our original QP into input for glop's preprocessor. The former
1407 // multiplies the objective vector by `objective_scaling_factor`, which
1408 // multiplies the duals by that factor as well. To undo this we divide by
1409 // `objective_scaling_factor`.
1410 solution.dual_solution /=
1411 presolve_info_->sharded_original_qp.Qp().objective_scaling_factor;
1412 // Glop's preprocessor sometimes violates the primal bounds constraints. To
1413 // be safe we project both primal and dual.
1414 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1415 solution.primal_solution);
1416 ProjectToDualVariableBounds(presolve_info_->sharded_original_qp,
1417 solution.dual_solution);
1418 return solution;
1419 } else {
1420 return working_solution;
1421 }
1422}
1423
1424void SetActiveSetInformation(const ShardedQuadraticProgram& sharded_qp,
1425 const VectorXd& primal_solution,
1426 const VectorXd& dual_solution,
1427 const VectorXd& primal_start_point,
1428 const VectorXd& dual_start_point,
1429 PointMetadata& metadata) {
1430 CHECK_EQ(primal_solution.size(), sharded_qp.PrimalSize());
1431 CHECK_EQ(dual_solution.size(), sharded_qp.DualSize());
1432 CHECK_EQ(primal_start_point.size(), sharded_qp.PrimalSize());
1433 CHECK_EQ(dual_start_point.size(), sharded_qp.DualSize());
1434
1435 const QuadraticProgram& qp = sharded_qp.Qp();
1436 metadata.set_active_primal_variable_count(
1437 static_cast<int64_t>(sharded_qp.PrimalSharder().ParallelSumOverShards(
1438 [&](const Sharder::Shard& shard) {
1439 const auto primal_shard = shard(primal_solution);
1440 const auto lower_bound_shard = shard(qp.variable_lower_bounds);
1441 const auto upper_bound_shard = shard(qp.variable_upper_bounds);
1442 return (primal_shard.array() > lower_bound_shard.array() &&
1443 primal_shard.array() < upper_bound_shard.array())
1444 .count();
1445 })));
1446
1447 // Most of the computation from the previous `ParallelSumOverShards` is
1448 // duplicated here. However the overhead shouldn't be too large, and using
1449 // `ParallelSumOverShards` is simpler than just using `ParallelForEachShard`.
1450 metadata.set_active_primal_variable_change(
1451 static_cast<int64_t>(sharded_qp.PrimalSharder().ParallelSumOverShards(
1452 [&](const Sharder::Shard& shard) {
1453 const auto primal_shard = shard(primal_solution);
1454 const auto primal_start_shard = shard(primal_start_point);
1455 const auto lower_bound_shard = shard(qp.variable_lower_bounds);
1456 const auto upper_bound_shard = shard(qp.variable_upper_bounds);
1457 return ((primal_shard.array() > lower_bound_shard.array() &&
1458 primal_shard.array() < upper_bound_shard.array()) !=
1459 (primal_start_shard.array() > lower_bound_shard.array() &&
1460 primal_start_shard.array() < upper_bound_shard.array()))
1461 .count();
1462 })));
1463
1464 metadata.set_active_dual_variable_count(
1465 static_cast<int64_t>(sharded_qp.DualSharder().ParallelSumOverShards(
1466 [&](const Sharder::Shard& shard) {
1467 const auto dual_shard = shard(dual_solution);
1468 const auto lower_bound_shard = shard(qp.constraint_lower_bounds);
1469 const auto upper_bound_shard = shard(qp.constraint_upper_bounds);
1470 const double kInfinity = std::numeric_limits<double>::infinity();
1471 return (dual_shard.array() != 0.0 ||
1472 (lower_bound_shard.array() == -kInfinity &&
1473 upper_bound_shard.array() == kInfinity))
1474 .count();
1475 })));
1476
1477 metadata.set_active_dual_variable_change(
1478 static_cast<int64_t>(sharded_qp.DualSharder().ParallelSumOverShards(
1479 [&](const Sharder::Shard& shard) {
1480 const auto dual_shard = shard(dual_solution);
1481 const auto dual_start_shard = shard(dual_start_point);
1482 const auto lower_bound_shard = shard(qp.constraint_lower_bounds);
1483 const auto upper_bound_shard = shard(qp.constraint_upper_bounds);
1484 const double kInfinity = std::numeric_limits<double>::infinity();
1485 return ((dual_shard.array() != 0.0 ||
1486 (lower_bound_shard.array() == -kInfinity &&
1487 upper_bound_shard.array() == kInfinity)) !=
1488 (dual_start_shard.array() != 0.0 ||
1489 (lower_bound_shard.array() == -kInfinity &&
1490 upper_bound_shard.array() == kInfinity)))
1491 .count();
1492 })));
1493}
1494
1495void PreprocessSolver::AddPointMetadata(
1496 const PrimalDualHybridGradientParams& params,
1497 const VectorXd& primal_solution, const VectorXd& dual_solution,
1498 PointType point_type, const VectorXd& last_primal_start_point,
1499 const VectorXd& last_dual_start_point, IterationStats& stats) const {
1500 PointMetadata metadata;
1501 metadata.set_point_type(point_type);
1502 std::vector<int> random_projection_seeds(
1503 params.random_projection_seeds().begin(),
1504 params.random_projection_seeds().end());
1505 SetRandomProjections(sharded_qp_, primal_solution, dual_solution,
1506 random_projection_seeds, metadata);
1507 if (point_type != POINT_TYPE_ITERATE_DIFFERENCE) {
1508 SetActiveSetInformation(sharded_qp_, primal_solution, dual_solution,
1509 last_primal_start_point, last_dual_start_point,
1510 metadata);
1511 }
1512 *stats.add_point_metadata() = metadata;
1513}
1514
1515std::optional<TerminationReasonAndPointType>
1516PreprocessSolver::UpdateIterationStatsAndCheckTermination(
1517 const PrimalDualHybridGradientParams& params,
1518 bool force_numerical_termination, const VectorXd& working_primal_current,
1519 const VectorXd& working_dual_current,
1520 const VectorXd* working_primal_average,
1521 const VectorXd* working_dual_average, const VectorXd* working_primal_delta,
1522 const VectorXd* working_dual_delta, const VectorXd& last_primal_start_point,
1523 const VectorXd& last_dual_start_point,
1524 const std::atomic<bool>* interrupt_solve,
1525 const IterationType iteration_type, const IterationStats& full_stats,
1526 IterationStats& stats) {
1527 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1528 params, working_primal_current, working_dual_current,
1529 POINT_TYPE_CURRENT_ITERATE, stats.add_convergence_information(),
1530 stats.add_infeasibility_information());
1531 AddPointMetadata(params, working_primal_current, working_dual_current,
1532 POINT_TYPE_CURRENT_ITERATE, last_primal_start_point,
1533 last_dual_start_point, stats);
1534 if (working_primal_average != nullptr && working_dual_average != nullptr) {
1535 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1536 params, *working_primal_average, *working_dual_average,
1537 POINT_TYPE_AVERAGE_ITERATE, stats.add_convergence_information(),
1538 stats.add_infeasibility_information());
1539 AddPointMetadata(params, *working_primal_average, *working_dual_average,
1540 POINT_TYPE_AVERAGE_ITERATE, last_primal_start_point,
1541 last_dual_start_point, stats);
1542 }
1543 // Undoing presolve doesn't work for iterate differences.
1544 if (!presolve_info_.has_value() && working_primal_delta != nullptr &&
1545 working_dual_delta != nullptr) {
1546 ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1547 params, *working_primal_delta, *working_dual_delta,
1548 POINT_TYPE_ITERATE_DIFFERENCE, nullptr,
1549 stats.add_infeasibility_information());
1550 AddPointMetadata(params, *working_primal_delta, *working_dual_delta,
1551 POINT_TYPE_ITERATE_DIFFERENCE, last_primal_start_point,
1552 last_dual_start_point, stats);
1553 }
1554 constexpr int kLogEvery = 15;
1555 absl::Time logging_time = absl::Now();
1556 if (params.verbosity_level() >= 2 &&
1557 (params.log_interval_seconds() == 0.0 ||
1558 logging_time - time_of_last_log_ >=
1559 absl::Seconds(params.log_interval_seconds()))) {
1560 if (log_counter_ == 0) {
1561 LogIterationStatsHeader(params.verbosity_level(),
1562 params.use_feasibility_polishing(), logger_);
1563 }
1564 LogIterationStats(params.verbosity_level(),
1565 params.use_feasibility_polishing(), iteration_type, stats,
1566 params.termination_criteria(), original_bound_norms_,
1567 POINT_TYPE_AVERAGE_ITERATE, logger_);
1568 if (params.verbosity_level() >= 4) {
1569 // If the convergence information doesn't contain an average iterate, the
1570 // previous `LogIterationStats()` will report some other iterate (usually
1571 // the current one) so don't repeat logging it.
1572 if (GetConvergenceInformation(stats, POINT_TYPE_AVERAGE_ITERATE) !=
1573 std::nullopt) {
1574 LogIterationStats(
1575 params.verbosity_level(), params.use_feasibility_polishing(),
1576 iteration_type, stats, params.termination_criteria(),
1577 original_bound_norms_, POINT_TYPE_CURRENT_ITERATE, logger_);
1578 }
1579 }
1580 time_of_last_log_ = logging_time;
1581 if (++log_counter_ >= kLogEvery) {
1582 log_counter_ = 0;
1583 }
1584 }
1585 if (iteration_stats_callback_ != nullptr) {
1586 iteration_stats_callback_(
1587 {.iteration_type = iteration_type,
1588 .termination_criteria = params.termination_criteria(),
1589 .iteration_stats = stats,
1590 .bound_norms = original_bound_norms_});
1591 }
1592
1594 params.termination_criteria(), stats, original_bound_norms_,
1595 force_numerical_termination);
1596 termination.has_value()) {
1597 return termination;
1598 }
1599 return CheckSimpleTerminationCriteria(params.termination_criteria(),
1600 full_stats, interrupt_solve);
1601}
1602
1603void PreprocessSolver::ComputeConvergenceAndInfeasibilityFromWorkingSolution(
1604 const PrimalDualHybridGradientParams& params,
1605 const VectorXd& working_primal, const VectorXd& working_dual,
1606 PointType candidate_type, ConvergenceInformation* convergence_information,
1607 InfeasibilityInformation* infeasibility_information) const {
1608 const TerminationCriteria::DetailedOptimalityCriteria criteria =
1609 EffectiveOptimalityCriteria(params.termination_criteria());
1610 const double primal_epsilon_ratio =
1611 EpsilonRatio(criteria.eps_optimal_primal_residual_absolute(),
1612 criteria.eps_optimal_primal_residual_relative());
1613 const double dual_epsilon_ratio =
1614 EpsilonRatio(criteria.eps_optimal_dual_residual_absolute(),
1615 criteria.eps_optimal_dual_residual_relative());
1616 if (presolve_info_.has_value()) {
1617 // Undoing presolve doesn't make sense for iterate differences.
1618 CHECK_NE(candidate_type, POINT_TYPE_ITERATE_DIFFERENCE);
1619
1620 PrimalAndDualSolution original = RecoverOriginalSolution(
1621 {.primal_solution = working_primal, .dual_solution = working_dual});
1622 if (convergence_information != nullptr) {
1623 *convergence_information = ComputeConvergenceInformation(
1624 params, presolve_info_->sharded_original_qp,
1625 presolve_info_->trivial_col_scaling_vec,
1626 presolve_info_->trivial_row_scaling_vec, original.primal_solution,
1627 original.dual_solution, primal_epsilon_ratio, dual_epsilon_ratio,
1628 candidate_type);
1629 }
1630 if (infeasibility_information != nullptr) {
1631 VectorXd primal_copy =
1632 CloneVector(original.primal_solution,
1633 presolve_info_->sharded_original_qp.PrimalSharder());
1634 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1635 primal_copy,
1636 /*use_feasibility_bounds=*/true);
1637 // Only iterate differences should be able to violate the dual variable
1638 // bounds, and iterate differences aren't used when presolve is enabled.
1639 *infeasibility_information = ComputeInfeasibilityInformation(
1640 params, presolve_info_->sharded_original_qp,
1641 presolve_info_->trivial_col_scaling_vec,
1642 presolve_info_->trivial_row_scaling_vec, primal_copy,
1643 original.dual_solution, original.primal_solution, candidate_type);
1644 }
1645 } else {
1646 if (convergence_information != nullptr) {
1647 *convergence_information = ComputeConvergenceInformation(
1648 params, sharded_qp_, col_scaling_vec_, row_scaling_vec_,
1649 working_primal, working_dual, primal_epsilon_ratio,
1650 dual_epsilon_ratio, candidate_type);
1651 }
1652 if (infeasibility_information != nullptr) {
1653 VectorXd primal_copy =
1654 CloneVector(working_primal, sharded_qp_.PrimalSharder());
1655 ProjectToPrimalVariableBounds(sharded_qp_, primal_copy,
1656 /*use_feasibility_bounds=*/true);
1657 if (candidate_type == POINT_TYPE_ITERATE_DIFFERENCE) {
1658 // Iterate differences might not satisfy the dual variable bounds.
1659 VectorXd dual_copy =
1660 CloneVector(working_dual, sharded_qp_.DualSharder());
1661 ProjectToDualVariableBounds(sharded_qp_, dual_copy);
1662 *infeasibility_information = ComputeInfeasibilityInformation(
1663 params, sharded_qp_, col_scaling_vec_, row_scaling_vec_,
1664 primal_copy, dual_copy, working_primal, candidate_type);
1665 } else {
1666 *infeasibility_information = ComputeInfeasibilityInformation(
1667 params, sharded_qp_, col_scaling_vec_, row_scaling_vec_,
1668 primal_copy, working_dual, working_primal, candidate_type);
1669 }
1670 }
1671 }
1672}
1673
1674// `result` is used both as the input and as the temporary that will be
1675// returned.
1676SolverResult PreprocessSolver::ConstructOriginalSolverResult(
1677 const PrimalDualHybridGradientParams& params, SolverResult result,
1678 SolverLogger& logger) const {
1679 const bool use_zero_primal_objective =
1680 result.solve_log.termination_reason() ==
1681 TERMINATION_REASON_PRIMAL_INFEASIBLE;
1682 if (presolve_info_.has_value()) {
1683 // Transform the solutions so they match the original unscaled problem.
1684 PrimalAndDualSolution original_solution = RecoverOriginalSolution(
1685 {.primal_solution = std::move(result.primal_solution),
1686 .dual_solution = std::move(result.dual_solution)});
1687 result.primal_solution = std::move(original_solution.primal_solution);
1688 if (result.solve_log.termination_reason() ==
1689 TERMINATION_REASON_DUAL_INFEASIBLE) {
1690 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1691 result.primal_solution,
1692 /*use_feasibility_bounds=*/true);
1693 }
1694 // If presolve is enabled, no projection of the dual is performed when
1695 // checking for infeasibility.
1696 result.dual_solution = std::move(original_solution.dual_solution);
1697 // `RecoverOriginalSolution` doesn't recover reduced costs so we need to
1698 // compute them with respect to the original problem.
1699 result.reduced_costs = ReducedCosts(
1700 params, presolve_info_->sharded_original_qp, result.primal_solution,
1701 result.dual_solution, use_zero_primal_objective);
1702 } else {
1703 if (result.solve_log.termination_reason() ==
1704 TERMINATION_REASON_DUAL_INFEASIBLE) {
1705 ProjectToPrimalVariableBounds(sharded_qp_, result.primal_solution,
1706 /*use_feasibility_bounds=*/true);
1707 }
1708 if (result.solve_log.termination_reason() ==
1709 TERMINATION_REASON_PRIMAL_INFEASIBLE) {
1710 ProjectToDualVariableBounds(sharded_qp_, result.dual_solution);
1711 }
1712 result.reduced_costs =
1713 ReducedCosts(params, sharded_qp_, result.primal_solution,
1714 result.dual_solution, use_zero_primal_objective);
1715 // Transform the solutions so they match the original unscaled problem.
1716 CoefficientWiseProductInPlace(col_scaling_vec_, sharded_qp_.PrimalSharder(),
1717 result.primal_solution);
1718 CoefficientWiseProductInPlace(row_scaling_vec_, sharded_qp_.DualSharder(),
1719 result.dual_solution);
1721 col_scaling_vec_, sharded_qp_.PrimalSharder(), result.reduced_costs);
1722 }
1723 IterationType iteration_type;
1724 switch (result.solve_log.solution_type()) {
1725 case POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION:
1726 iteration_type = IterationType::kFeasibilityPolishingTermination;
1727 break;
1728 case POINT_TYPE_PRESOLVER_SOLUTION:
1729 iteration_type = IterationType::kPresolveTermination;
1730 break;
1731 default:
1732 iteration_type = IterationType::kNormalTermination;
1733 break;
1734 }
1735 if (iteration_stats_callback_ != nullptr) {
1736 iteration_stats_callback_(
1737 {.iteration_type = iteration_type,
1738 .termination_criteria = params.termination_criteria(),
1739 .iteration_stats = result.solve_log.solution_stats(),
1740 .bound_norms = original_bound_norms_});
1741 }
1742
1743 if (params.verbosity_level() >= 1) {
1744 SOLVER_LOG(&logger, "Termination reason: ",
1745 TerminationReason_Name(result.solve_log.termination_reason()));
1746 SOLVER_LOG(&logger, "Solution point type: ",
1747 PointType_Name(result.solve_log.solution_type()));
1748 SOLVER_LOG(&logger, "Final solution stats:");
1749 LogIterationStatsHeader(params.verbosity_level(),
1750 params.use_feasibility_polishing(), logger);
1751 LogIterationStats(params.verbosity_level(),
1752 params.use_feasibility_polishing(), iteration_type,
1753 result.solve_log.solution_stats(),
1754 params.termination_criteria(), original_bound_norms_,
1755 result.solve_log.solution_type(), logger);
1756 const auto& convergence_info = GetConvergenceInformation(
1757 result.solve_log.solution_stats(), result.solve_log.solution_type());
1758 if (convergence_info.has_value()) {
1759 if (std::isfinite(convergence_info->corrected_dual_objective())) {
1760 SOLVER_LOG(&logger, "Dual objective after infeasibility correction: ",
1761 convergence_info->corrected_dual_objective());
1762 }
1763 }
1764 }
1765 return result;
1766}
1767
1768Solver::Solver(const PrimalDualHybridGradientParams& params,
1769 VectorXd starting_primal_solution,
1770 VectorXd starting_dual_solution, const double initial_step_size,
1771 const double initial_primal_weight,
1772 PreprocessSolver* preprocess_solver)
1773 : params_(params),
1774 current_primal_solution_(std::move(starting_primal_solution)),
1775 current_dual_solution_(std::move(starting_dual_solution)),
1776 primal_average_(&preprocess_solver->ShardedWorkingQp().PrimalSharder()),
1777 dual_average_(&preprocess_solver->ShardedWorkingQp().DualSharder()),
1778 step_size_(initial_step_size),
1779 primal_weight_(initial_primal_weight),
1780 preprocess_solver_(preprocess_solver) {}
1781
1782Solver::NextSolutionAndDelta Solver::ComputeNextPrimalSolution(
1783 double primal_step_size) const {
1784 const int64_t primal_size = ShardedWorkingQp().PrimalSize();
1785 NextSolutionAndDelta result = {
1786 .value = VectorXd(primal_size),
1787 .delta = VectorXd(primal_size),
1788 };
1789 const QuadraticProgram& qp = WorkingQp();
1790 // This computes the primal portion of the PDHG algorithm:
1791 // argmin_x[gradient(f)(`current_primal_solution_`)^T x + g(x)
1792 // + `current_dual_solution_`^T K x
1793 // + (0.5 / `primal_step_size`) * norm(x - `current_primal_solution_`)^2]
1794 // See Sections 2 - 3 of Chambolle and Pock and the comment in the header.
1795 // We omitted the constant terms from Chambolle and Pock's (7).
1796 // This minimization is easy to do in closed form since it can be separated
1797 // into independent problems for each of the primal variables.
1798 ShardedWorkingQp().PrimalSharder().ParallelForEachShard(
1799 [&](const Sharder::Shard& shard) {
1800 if (!IsLinearProgram(qp)) {
1801 // TODO(user): Does changing this to auto (so it becomes an
1802 // Eigen deferred result), or inlining it below, change performance?
1803 const VectorXd diagonal_scaling =
1804 primal_step_size *
1805 shard(qp.objective_matrix->diagonal()).array() +
1806 1.0;
1807 shard(result.value) =
1808 (shard(current_primal_solution_) -
1809 primal_step_size *
1810 (shard(qp.objective_vector) - shard(current_dual_product_)))
1811 // Scale i-th element by 1 / (1 + `primal_step_size` * Q_{ii})
1812 .cwiseQuotient(diagonal_scaling)
1813 .cwiseMin(shard(qp.variable_upper_bounds))
1814 .cwiseMax(shard(qp.variable_lower_bounds));
1815 } else {
1816 // The formula in the LP case is simplified for better performance.
1817 shard(result.value) =
1818 (shard(current_primal_solution_) -
1819 primal_step_size *
1820 (shard(qp.objective_vector) - shard(current_dual_product_)))
1821 .cwiseMin(shard(qp.variable_upper_bounds))
1822 .cwiseMax(shard(qp.variable_lower_bounds));
1823 }
1824 shard(result.delta) =
1825 shard(result.value) - shard(current_primal_solution_);
1826 });
1827 return result;
1828}
1829
1830Solver::NextSolutionAndDelta Solver::ComputeNextDualSolution(
1831 double dual_step_size, double extrapolation_factor,
1832 const NextSolutionAndDelta& next_primal_solution) const {
1833 const int64_t dual_size = ShardedWorkingQp().DualSize();
1834 NextSolutionAndDelta result = {
1835 .value = VectorXd(dual_size),
1836 .delta = VectorXd(dual_size),
1837 };
1838 const QuadraticProgram& qp = WorkingQp();
1839 VectorXd extrapolated_primal(ShardedWorkingQp().PrimalSize());
1840 ShardedWorkingQp().PrimalSharder().ParallelForEachShard(
1841 [&](const Sharder::Shard& shard) {
1842 shard(extrapolated_primal) =
1843 (shard(next_primal_solution.value) +
1844 extrapolation_factor * shard(next_primal_solution.delta));
1845 });
1846 // TODO(user): Refactor this multiplication so that we only do one matrix
1847 // vector multiply for the primal variable. This only applies to Malitsky and
1848 // Pock and not to the adaptive step size rule.
1849 ShardedWorkingQp().TransposedConstraintMatrixSharder().ParallelForEachShard(
1850 [&](const Sharder::Shard& shard) {
1851 VectorXd temp =
1852 shard(current_dual_solution_) -
1853 dual_step_size *
1854 shard(ShardedWorkingQp().TransposedConstraintMatrix())
1855 .transpose() *
1856 extrapolated_primal;
1857 // Each element of the argument of `.cwiseMin()` is the critical point
1858 // of the respective 1D minimization problem if it's negative.
1859 // Likewise the argument to the `.cwiseMax()` is the critical point if
1860 // positive.
1861 shard(result.value) =
1862 VectorXd::Zero(temp.size())
1863 .cwiseMin(temp +
1864 dual_step_size * shard(qp.constraint_upper_bounds))
1865 .cwiseMax(temp +
1866 dual_step_size * shard(qp.constraint_lower_bounds));
1867 shard(result.delta) =
1868 (shard(result.value) - shard(current_dual_solution_));
1869 });
1870 return result;
1871}
1872
1873double Solver::ComputeMovement(const VectorXd& delta_primal,
1874 const VectorXd& delta_dual) const {
1875 const double primal_movement =
1876 (0.5 * primal_weight_) *
1877 SquaredNorm(delta_primal, ShardedWorkingQp().PrimalSharder());
1878 const double dual_movement =
1879 (0.5 / primal_weight_) *
1880 SquaredNorm(delta_dual, ShardedWorkingQp().DualSharder());
1881 return primal_movement + dual_movement;
1882}
1883
1884double Solver::ComputeNonlinearity(const VectorXd& delta_primal,
1885 const VectorXd& next_dual_product) const {
1886 // Lemma 1 in Chambolle and Pock includes a term with L_f, the Lipshitz
1887 // constant of f. This is zero in our formulation.
1888 return ShardedWorkingQp().PrimalSharder().ParallelSumOverShards(
1889 [&](const Sharder::Shard& shard) {
1890 return -shard(delta_primal)
1891 .dot(shard(next_dual_product) -
1892 shard(current_dual_product_));
1893 });
1894}
1895
1896IterationStats Solver::CreateSimpleIterationStats(
1897 RestartChoice restart_used) const {
1898 IterationStats stats;
1899 double num_kkt_passes_per_rejected_step = 1.0;
1900 if (params_.linesearch_rule() ==
1901 PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE) {
1902 num_kkt_passes_per_rejected_step = 0.5;
1903 }
1904 stats.set_iteration_number(iterations_completed_);
1905 stats.set_cumulative_rejected_steps(num_rejected_steps_);
1906 // TODO(user): This formula doesn't account for kkt passes in major
1907 // iterations.
1908 stats.set_cumulative_kkt_matrix_passes(iterations_completed_ +
1909 num_kkt_passes_per_rejected_step *
1910 num_rejected_steps_);
1911 stats.set_cumulative_time_sec(preprocessing_time_sec_ + timer_.Get());
1912 stats.set_restart_used(restart_used);
1913 stats.set_step_size(step_size_);
1914 stats.set_primal_weight(primal_weight_);
1915 return stats;
1916}
1917
1918double Solver::DistanceTraveledFromLastStart(
1919 const VectorXd& primal_solution, const VectorXd& dual_solution) const {
1920 return std::sqrt((0.5 * primal_weight_) *
1921 SquaredDistance(primal_solution,
1922 last_primal_start_point_,
1923 ShardedWorkingQp().PrimalSharder()) +
1924 (0.5 / primal_weight_) *
1925 SquaredDistance(dual_solution, last_dual_start_point_,
1926 ShardedWorkingQp().DualSharder()));
1927}
1928
1929LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtCurrent() const {
1930 const double distance_traveled_by_current = DistanceTraveledFromLastStart(
1931 current_primal_solution_, current_dual_solution_);
1933 ShardedWorkingQp(), current_primal_solution_, current_dual_solution_,
1934 PrimalDualNorm::kEuclideanNorm, primal_weight_,
1935 distance_traveled_by_current,
1936 /*primal_product=*/nullptr, &current_dual_product_,
1937 params_.use_diagonal_qp_trust_region_solver(),
1938 params_.diagonal_qp_trust_region_solver_tolerance());
1939}
1940
1941LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtAverage() const {
1942 // TODO(user): These vectors are recomputed again for termination checks
1943 // and again if we eventually restart to the average.
1944 VectorXd average_primal = PrimalAverage();
1945 VectorXd average_dual = DualAverage();
1946
1947 const double distance_traveled_by_average =
1948 DistanceTraveledFromLastStart(average_primal, average_dual);
1949
1951 ShardedWorkingQp(), average_primal, average_dual,
1952 PrimalDualNorm::kEuclideanNorm, primal_weight_,
1953 distance_traveled_by_average,
1954 /*primal_product=*/nullptr, /*dual_product=*/nullptr,
1955 params_.use_diagonal_qp_trust_region_solver(),
1956 params_.diagonal_qp_trust_region_solver_tolerance());
1957}
1958
1959bool AverageHasBetterPotential(
1960 const LocalizedLagrangianBounds& local_bounds_at_average,
1961 const LocalizedLagrangianBounds& local_bounds_at_current) {
1962 return BoundGap(local_bounds_at_average) /
1963 MathUtil::Square(local_bounds_at_average.radius) <
1964 BoundGap(local_bounds_at_current) /
1965 MathUtil::Square(local_bounds_at_current.radius);
1966}
1967
1968double NormalizedGap(
1969 const LocalizedLagrangianBounds& local_bounds_at_candidate) {
1970 const double distance_traveled_by_candidate =
1971 local_bounds_at_candidate.radius;
1972 return BoundGap(local_bounds_at_candidate) / distance_traveled_by_candidate;
1973}
1974
1975// TODO(user): Review / cleanup adaptive heuristic.
1976bool Solver::ShouldDoAdaptiveRestartHeuristic(
1977 double candidate_normalized_gap) const {
1978 const double gap_reduction_ratio =
1979 candidate_normalized_gap / normalized_gap_at_last_restart_;
1980 if (gap_reduction_ratio < params_.sufficient_reduction_for_restart()) {
1981 return true;
1982 }
1983 if (gap_reduction_ratio < params_.necessary_reduction_for_restart() &&
1984 candidate_normalized_gap > normalized_gap_at_last_trial_) {
1985 // We've made the "necessary" amount of progress, and iterates appear to
1986 // be getting worse, so restart.
1987 return true;
1988 }
1989 return false;
1990}
1991
1992RestartChoice Solver::DetermineDistanceBasedRestartChoice() const {
1993 // The following checks are safeguards that normally should not be triggered.
1994 if (primal_average_.NumTerms() == 0) {
1995 return RESTART_CHOICE_NO_RESTART;
1996 } else if (distance_based_restart_info_.length_of_last_restart_period == 0) {
1997 return RESTART_CHOICE_RESTART_TO_AVERAGE;
1998 }
1999 const int restart_period_length = primal_average_.NumTerms();
2000 const double distance_moved_this_restart_period_by_average =
2001 DistanceTraveledFromLastStart(primal_average_.ComputeAverage(),
2002 dual_average_.ComputeAverage());
2004 distance_based_restart_info_.distance_moved_last_restart_period;
2005
2006 // A restart should be triggered when the normalized distance traveled by
2007 // the average is at least a constant factor smaller than the last.
2008 // TODO(user): Experiment with using `.necessary_reduction_for_restart()`
2009 // as a heuristic when deciding if a restart should be triggered.
2010 if ((distance_moved_this_restart_period_by_average / restart_period_length) <
2011 params_.sufficient_reduction_for_restart() *
2013 distance_based_restart_info_.length_of_last_restart_period)) {
2014 // Restart at current solution when it yields a smaller normalized potential
2015 // function value than the average (heuristic suggested by ohinder@).
2016 if (AverageHasBetterPotential(ComputeLocalizedBoundsAtAverage(),
2017 ComputeLocalizedBoundsAtCurrent())) {
2018 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2019 } else {
2020 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2021 }
2022 } else {
2023 return RESTART_CHOICE_NO_RESTART;
2024 }
2025}
2026
2027RestartChoice Solver::ChooseRestartToApply(const bool is_major_iteration) {
2028 if (!primal_average_.HasNonzeroWeight() &&
2029 !dual_average_.HasNonzeroWeight()) {
2030 return RESTART_CHOICE_NO_RESTART;
2031 }
2032 // TODO(user): This forced restart is very important for the performance of
2033 // `ADAPTIVE_HEURISTIC`. Test if the impact comes primarily from the first
2034 // forced restart (which would unseat a good initial starting point that could
2035 // prevent restarts early in the solve) or if it's really needed for the full
2036 // duration of the solve. If it is really needed, should we then trigger major
2037 // iterations on powers of two?
2038 const int restart_length = primal_average_.NumTerms();
2039 if (restart_length >= iterations_completed_ / 2 &&
2040 params_.restart_strategy() ==
2041 PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC) {
2042 if (AverageHasBetterPotential(ComputeLocalizedBoundsAtAverage(),
2043 ComputeLocalizedBoundsAtCurrent())) {
2044 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2045 } else {
2046 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2047 }
2048 }
2049 if (is_major_iteration) {
2050 switch (params_.restart_strategy()) {
2051 case PrimalDualHybridGradientParams::NO_RESTARTS:
2052 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2053 case PrimalDualHybridGradientParams::EVERY_MAJOR_ITERATION:
2054 return RESTART_CHOICE_RESTART_TO_AVERAGE;
2055 case PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC: {
2056 const LocalizedLagrangianBounds local_bounds_at_average =
2057 ComputeLocalizedBoundsAtAverage();
2058 const LocalizedLagrangianBounds local_bounds_at_current =
2059 ComputeLocalizedBoundsAtCurrent();
2060 double normalized_gap;
2061 RestartChoice choice;
2062 if (AverageHasBetterPotential(local_bounds_at_average,
2063 local_bounds_at_current)) {
2064 normalized_gap = NormalizedGap(local_bounds_at_average);
2065 choice = RESTART_CHOICE_RESTART_TO_AVERAGE;
2066 } else {
2067 normalized_gap = NormalizedGap(local_bounds_at_current);
2068 choice = RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
2069 }
2070 if (ShouldDoAdaptiveRestartHeuristic(normalized_gap)) {
2071 return choice;
2072 } else {
2073 normalized_gap_at_last_trial_ = normalized_gap;
2074 return RESTART_CHOICE_NO_RESTART;
2075 }
2076 }
2077 case PrimalDualHybridGradientParams::ADAPTIVE_DISTANCE_BASED: {
2078 return DetermineDistanceBasedRestartChoice();
2079 }
2080 default:
2081 LOG(FATAL) << "Unrecognized restart_strategy "
2082 << params_.restart_strategy();
2083 return RESTART_CHOICE_UNSPECIFIED;
2084 }
2085 } else {
2086 return RESTART_CHOICE_NO_RESTART;
2087 }
2088}
2089
2090VectorXd Solver::PrimalAverage() const {
2091 if (primal_average_.HasNonzeroWeight()) {
2092 return primal_average_.ComputeAverage();
2093 } else {
2094 return current_primal_solution_;
2095 }
2096}
2097
2098VectorXd Solver::DualAverage() const {
2099 if (dual_average_.HasNonzeroWeight()) {
2100 return dual_average_.ComputeAverage();
2101 } else {
2102 return current_dual_solution_;
2103 }
2104}
2105
2106double Solver::ComputeNewPrimalWeight() const {
2107 const double primal_distance =
2108 Distance(current_primal_solution_, last_primal_start_point_,
2109 ShardedWorkingQp().PrimalSharder());
2110 const double dual_distance =
2111 Distance(current_dual_solution_, last_dual_start_point_,
2112 ShardedWorkingQp().DualSharder());
2113 // This choice of a nonzero tolerance balances performance and numerical
2114 // issues caused by very huge or very tiny weights. It was picked as the best
2115 // among {0.0, 1.0e-20, 2.0e-16, 1.0e-10, 1.0e-5} on the preprocessed MIPLIB
2116 // dataset. The effect of changing this value is relatively minor overall.
2117 constexpr double kNonzeroTol = 1.0e-10;
2118 if (primal_distance <= kNonzeroTol || primal_distance >= 1.0 / kNonzeroTol ||
2119 dual_distance <= kNonzeroTol || dual_distance >= 1.0 / kNonzeroTol) {
2120 return primal_weight_;
2121 }
2122 const double smoothing_param = params_.primal_weight_update_smoothing();
2123 const double unsmoothed_new_primal_weight = dual_distance / primal_distance;
2124 const double new_primal_weight =
2125 std::exp(smoothing_param * std::log(unsmoothed_new_primal_weight) +
2126 (1.0 - smoothing_param) * std::log(primal_weight_));
2127 if (params_.verbosity_level() >= 4) {
2128 SOLVER_LOG(&preprocess_solver_->Logger(), "New computed primal weight is ",
2129 new_primal_weight, " at iteration ", iterations_completed_);
2130 }
2131 return new_primal_weight;
2132}
2133
2134SolverResult Solver::PickSolutionAndConstructSolverResult(
2135 VectorXd primal_solution, VectorXd dual_solution,
2136 const IterationStats& stats, TerminationReason termination_reason,
2137 PointType output_type, SolveLog solve_log) const {
2138 switch (output_type) {
2139 case POINT_TYPE_CURRENT_ITERATE:
2140 AssignVector(current_primal_solution_, ShardedWorkingQp().PrimalSharder(),
2141 primal_solution);
2142 AssignVector(current_dual_solution_, ShardedWorkingQp().DualSharder(),
2143 dual_solution);
2144 break;
2145 case POINT_TYPE_ITERATE_DIFFERENCE:
2146 AssignVector(current_primal_delta_, ShardedWorkingQp().PrimalSharder(),
2147 primal_solution);
2148 AssignVector(current_dual_delta_, ShardedWorkingQp().DualSharder(),
2149 dual_solution);
2150 break;
2151 case POINT_TYPE_AVERAGE_ITERATE:
2152 case POINT_TYPE_PRESOLVER_SOLUTION:
2153 break;
2154 default:
2155 // Default to average whenever `output_type` is `POINT_TYPE_NONE`.
2156 output_type = POINT_TYPE_AVERAGE_ITERATE;
2157 break;
2158 }
2159 return ConstructSolverResult(
2160 std::move(primal_solution), std::move(dual_solution), stats,
2161 termination_reason, output_type, std::move(solve_log));
2162}
2163
2164void Solver::ApplyRestartChoice(const RestartChoice restart_to_apply) {
2165 switch (restart_to_apply) {
2166 case RESTART_CHOICE_UNSPECIFIED:
2167 case RESTART_CHOICE_NO_RESTART:
2168 return;
2169 case RESTART_CHOICE_WEIGHTED_AVERAGE_RESET:
2170 if (params_.verbosity_level() >= 4) {
2171 SOLVER_LOG(&preprocess_solver_->Logger(),
2172 "Restarted to current on iteration ", iterations_completed_,
2173 " after ", primal_average_.NumTerms(), " iterations");
2174 }
2175 break;
2176 case RESTART_CHOICE_RESTART_TO_AVERAGE:
2177 if (params_.verbosity_level() >= 4) {
2178 SOLVER_LOG(&preprocess_solver_->Logger(),
2179 "Restarted to average on iteration ", iterations_completed_,
2180 " after ", primal_average_.NumTerms(), " iterations");
2181 }
2182 current_primal_solution_ = primal_average_.ComputeAverage();
2183 current_dual_solution_ = dual_average_.ComputeAverage();
2184 current_dual_product_ = TransposedMatrixVectorProduct(
2185 WorkingQp().constraint_matrix, current_dual_solution_,
2186 ShardedWorkingQp().ConstraintMatrixSharder());
2187 break;
2188 }
2189 primal_weight_ = ComputeNewPrimalWeight();
2190 ratio_last_two_step_sizes_ = 1;
2191 if (params_.restart_strategy() ==
2192 PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC) {
2193 // It's important for the theory that the distances here are calculated
2194 // given the new primal weight.
2195 const LocalizedLagrangianBounds local_bounds_at_last_restart =
2196 ComputeLocalizedBoundsAtCurrent();
2197 const double distance_traveled_since_last_restart =
2198 local_bounds_at_last_restart.radius;
2199 normalized_gap_at_last_restart_ = BoundGap(local_bounds_at_last_restart) /
2200 distance_traveled_since_last_restart;
2201 normalized_gap_at_last_trial_ = std::numeric_limits<double>::infinity();
2202 } else if (params_.restart_strategy() ==
2203 PrimalDualHybridGradientParams::ADAPTIVE_DISTANCE_BASED) {
2204 // Update parameters for distance-based restarts.
2205 distance_based_restart_info_ = {
2206 .distance_moved_last_restart_period = DistanceTraveledFromLastStart(
2207 current_primal_solution_, current_dual_solution_),
2208 .length_of_last_restart_period = primal_average_.NumTerms()};
2209 }
2210 primal_average_.Clear();
2211 dual_average_.Clear();
2212 AssignVector(current_primal_solution_, ShardedWorkingQp().PrimalSharder(),
2213 /*dest=*/last_primal_start_point_);
2214 AssignVector(current_dual_solution_, ShardedWorkingQp().DualSharder(),
2215 /*dest=*/last_dual_start_point_);
2216}
2217
2218// Adds the work (`iteration_number`, `cumulative_kkt_matrix_passes`,
2219// `cumulative_rejected_steps`, and `cumulative_time_sec`) from
2220// `additional_work_stats` to `stats` and returns the result.
2221// `stats` is intentionally passed by value.
2222IterationStats AddWorkStats(IterationStats stats,
2223 const IterationStats& additional_work_stats) {
2224 stats.set_iteration_number(stats.iteration_number() +
2225 additional_work_stats.iteration_number());
2226 stats.set_cumulative_kkt_matrix_passes(
2227 stats.cumulative_kkt_matrix_passes() +
2228 additional_work_stats.cumulative_kkt_matrix_passes());
2229 stats.set_cumulative_rejected_steps(
2230 stats.cumulative_rejected_steps() +
2231 additional_work_stats.cumulative_rejected_steps());
2232 stats.set_cumulative_time_sec(stats.cumulative_time_sec() +
2233 additional_work_stats.cumulative_time_sec());
2234 return stats;
2235}
2236
2237// Accumulates and returns the work (`iteration_number`,
2238// `cumulative_kkt_matrix_passes`, `cumulative_rejected_steps`, and
2239// `cumulative_time_sec`) from `solve_log.feasibility_polishing_details`.
2240IterationStats WorkFromFeasibilityPolishing(const SolveLog& solve_log) {
2241 IterationStats result;
2242 for (const FeasibilityPolishingDetails& feasibility_polishing_detail :
2243 solve_log.feasibility_polishing_details()) {
2244 result = AddWorkStats(std::move(result),
2245 feasibility_polishing_detail.solution_stats());
2246 }
2247 return result;
2248}
2249
2250std::optional<SolverResult> Solver::MajorIterationAndTerminationCheck(
2251 const IterationType iteration_type, const bool force_numerical_termination,
2252 const std::atomic<bool>* interrupt_solve,
2253 const IterationStats& work_from_feasibility_polishing,
2254 SolveLog& solve_log) {
2255 const int major_iteration_cycle =
2256 iterations_completed_ % params_.major_iteration_frequency();
2257 const bool is_major_iteration =
2258 major_iteration_cycle == 0 && iterations_completed_ > 0;
2259 // Just decide what to do for now. The actual restart, if any, is
2260 // performed after the termination check.
2261 const RestartChoice restart = force_numerical_termination
2262 ? RESTART_CHOICE_NO_RESTART
2263 : ChooseRestartToApply(is_major_iteration);
2264 IterationStats stats = CreateSimpleIterationStats(restart);
2265 IterationStats full_work_stats =
2266 AddWorkStats(stats, work_from_feasibility_polishing);
2267 const bool check_termination =
2268 major_iteration_cycle % params_.termination_check_frequency() == 0 ||
2269 CheckSimpleTerminationCriteria(params_.termination_criteria(),
2270 full_work_stats, interrupt_solve)
2271 .has_value() ||
2272 force_numerical_termination;
2273 // We check termination on every major iteration.
2274 DCHECK(!is_major_iteration || check_termination);
2275 if (check_termination) {
2276 // Check for termination and update iteration stats with both simple and
2277 // solution statistics. The later are computationally harder to compute and
2278 // hence only computed here.
2279 VectorXd primal_average = PrimalAverage();
2280 VectorXd dual_average = DualAverage();
2281
2282 const std::optional<TerminationReasonAndPointType>
2283 maybe_termination_reason =
2284 preprocess_solver_->UpdateIterationStatsAndCheckTermination(
2285 params_, force_numerical_termination, current_primal_solution_,
2286 current_dual_solution_,
2287 primal_average_.HasNonzeroWeight() ? &primal_average : nullptr,
2288 dual_average_.HasNonzeroWeight() ? &dual_average : nullptr,
2289 current_primal_delta_.size() > 0 ? &current_primal_delta_
2290 : nullptr,
2291 current_dual_delta_.size() > 0 ? &current_dual_delta_ : nullptr,
2292 last_primal_start_point_, last_dual_start_point_,
2293 interrupt_solve, iteration_type, full_work_stats, stats);
2294 if (params_.record_iteration_stats()) {
2295 *solve_log.add_iteration_stats() = stats;
2296 }
2297 // We've terminated.
2298 if (maybe_termination_reason.has_value()) {
2299 IterationStats terminating_full_stats =
2300 AddWorkStats(stats, work_from_feasibility_polishing);
2301 return PickSolutionAndConstructSolverResult(
2302 std::move(primal_average), std::move(dual_average),
2303 terminating_full_stats, maybe_termination_reason->reason,
2304 maybe_termination_reason->type, std::move(solve_log));
2305 }
2306 } else if (params_.record_iteration_stats()) {
2307 // Record simple iteration stats only.
2308 *solve_log.add_iteration_stats() = stats;
2309 }
2310 ApplyRestartChoice(restart);
2311 return std::nullopt;
2312}
2313
2314void Solver::ResetAverageToCurrent() {
2315 primal_average_.Clear();
2316 dual_average_.Clear();
2317 primal_average_.Add(current_primal_solution_, /*weight=*/1.0);
2318 dual_average_.Add(current_dual_solution_, /*weight=*/1.0);
2319}
2320
2321void Solver::LogNumericalTermination() const {
2322 if (params_.verbosity_level() >= 2) {
2323 SOLVER_LOG(&preprocess_solver_->Logger(),
2324 "Forced numerical termination at iteration ",
2325 iterations_completed_);
2326 }
2327}
2328
2329void Solver::LogInnerIterationLimitHit() const {
2330 SOLVER_LOG(&preprocess_solver_->Logger(),
2331 "WARNING: Inner iteration limit reached at iteration ",
2332 iterations_completed_);
2333}
2334
2335InnerStepOutcome Solver::TakeMalitskyPockStep() {
2336 InnerStepOutcome outcome = InnerStepOutcome::kSuccessful;
2337 const double primal_step_size = step_size_ / primal_weight_;
2338 NextSolutionAndDelta next_primal_solution =
2339 ComputeNextPrimalSolution(primal_step_size);
2340 // The theory by Malitsky and Pock holds for any new_step_size in the interval
2341 // [`step_size`, `step_size` * sqrt(1 + `ratio_last_two_step_sizes_`)].
2342 // `dilating_coeff` determines where in this interval the new step size lands.
2343 // NOTE: Malitsky and Pock use theta for `ratio_last_two_step_sizes`.
2344 double dilating_coeff =
2345 1 + (params_.malitsky_pock_parameters().step_size_interpolation() *
2346 (sqrt(1 + ratio_last_two_step_sizes_) - 1));
2347 double new_primal_step_size = primal_step_size * dilating_coeff;
2348 double step_size_downscaling =
2349 params_.malitsky_pock_parameters().step_size_downscaling_factor();
2350 double contraction_factor =
2351 params_.malitsky_pock_parameters().linesearch_contraction_factor();
2352 const double dual_weight = primal_weight_ * primal_weight_;
2353 int inner_iterations = 0;
2354 for (bool accepted_step = false; !accepted_step; ++inner_iterations) {
2355 if (inner_iterations >= 60) {
2356 LogInnerIterationLimitHit();
2357 ResetAverageToCurrent();
2358 outcome = InnerStepOutcome::kForceNumericalTermination;
2359 break;
2360 }
2361 const double new_last_two_step_sizes_ratio =
2362 new_primal_step_size / primal_step_size;
2363 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
2364 dual_weight * new_primal_step_size, new_last_two_step_sizes_ratio,
2365 next_primal_solution);
2366
2367 VectorXd next_dual_product = TransposedMatrixVectorProduct(
2368 WorkingQp().constraint_matrix, next_dual_solution.value,
2369 ShardedWorkingQp().ConstraintMatrixSharder());
2370 double delta_dual_norm =
2371 Norm(next_dual_solution.delta, ShardedWorkingQp().DualSharder());
2372 double delta_dual_prod_norm =
2373 Distance(current_dual_product_, next_dual_product,
2374 ShardedWorkingQp().PrimalSharder());
2375 if (primal_weight_ * new_primal_step_size * delta_dual_prod_norm <=
2376 contraction_factor * delta_dual_norm) {
2377 // Accept new_step_size as a good step.
2378 step_size_ = new_primal_step_size * primal_weight_;
2379 ratio_last_two_step_sizes_ = new_last_two_step_sizes_ratio;
2380 // Malitsky and Pock guarantee uses a nonsymmetric weighted average,
2381 // the primal variable average involves the initial point, while the dual
2382 // doesn't. See Theorem 2 in https://arxiv.org/pdf/1608.08883.pdf for
2383 // details.
2384 if (!primal_average_.HasNonzeroWeight()) {
2385 primal_average_.Add(
2386 current_primal_solution_,
2387 /*weight=*/new_primal_step_size * new_last_two_step_sizes_ratio);
2388 }
2389
2390 current_primal_solution_ = std::move(next_primal_solution.value);
2391 current_dual_solution_ = std::move(next_dual_solution.value);
2392 current_dual_product_ = std::move(next_dual_product);
2393 primal_average_.Add(current_primal_solution_,
2394 /*weight=*/new_primal_step_size);
2395 dual_average_.Add(current_dual_solution_,
2396 /*weight=*/new_primal_step_size);
2397 const double movement =
2398 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
2399 if (movement == 0.0) {
2400 LogNumericalTermination();
2401 ResetAverageToCurrent();
2402 outcome = InnerStepOutcome::kForceNumericalTermination;
2403 } else if (movement > kDivergentMovement) {
2404 LogNumericalTermination();
2405 outcome = InnerStepOutcome::kForceNumericalTermination;
2406 }
2407 current_primal_delta_ = std::move(next_primal_solution.delta);
2408 current_dual_delta_ = std::move(next_dual_solution.delta);
2409 break;
2410 } else {
2411 new_primal_step_size = step_size_downscaling * new_primal_step_size;
2412 }
2413 }
2414 // `inner_iterations` isn't incremented for the accepted step.
2415 num_rejected_steps_ += inner_iterations;
2416 return outcome;
2417}
2418
2419InnerStepOutcome Solver::TakeAdaptiveStep() {
2420 InnerStepOutcome outcome = InnerStepOutcome::kSuccessful;
2421 int inner_iterations = 0;
2422 for (bool accepted_step = false; !accepted_step; ++inner_iterations) {
2423 if (inner_iterations >= 60) {
2424 LogInnerIterationLimitHit();
2425 ResetAverageToCurrent();
2426 outcome = InnerStepOutcome::kForceNumericalTermination;
2427 break;
2428 }
2429 const double primal_step_size = step_size_ / primal_weight_;
2430 const double dual_step_size = step_size_ * primal_weight_;
2431 NextSolutionAndDelta next_primal_solution =
2432 ComputeNextPrimalSolution(primal_step_size);
2433 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
2434 dual_step_size, /*extrapolation_factor=*/1.0, next_primal_solution);
2435 const double movement =
2436 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
2437 if (movement == 0.0) {
2438 LogNumericalTermination();
2439 ResetAverageToCurrent();
2440 outcome = InnerStepOutcome::kForceNumericalTermination;
2441 break;
2442 } else if (movement > kDivergentMovement) {
2443 LogNumericalTermination();
2444 outcome = InnerStepOutcome::kForceNumericalTermination;
2445 break;
2446 }
2447 VectorXd next_dual_product = TransposedMatrixVectorProduct(
2448 WorkingQp().constraint_matrix, next_dual_solution.value,
2449 ShardedWorkingQp().ConstraintMatrixSharder());
2450 const double nonlinearity =
2451 ComputeNonlinearity(next_primal_solution.delta, next_dual_product);
2452
2453 // See equation (5) in https://arxiv.org/pdf/2106.04756.pdf.
2454 const double step_size_limit =
2455 nonlinearity > 0 ? movement / nonlinearity
2456 : std::numeric_limits<double>::infinity();
2457
2458 if (step_size_ <= step_size_limit) {
2459 current_primal_solution_ = std::move(next_primal_solution.value);
2460 current_dual_solution_ = std::move(next_dual_solution.value);
2461 current_dual_product_ = std::move(next_dual_product);
2462 current_primal_delta_ = std::move(next_primal_solution.delta);
2463 current_dual_delta_ = std::move(next_dual_solution.delta);
2464 primal_average_.Add(current_primal_solution_, /*weight=*/step_size_);
2465 dual_average_.Add(current_dual_solution_, /*weight=*/step_size_);
2466 accepted_step = true;
2467 }
2468 const double total_steps_attempted =
2469 num_rejected_steps_ + inner_iterations + iterations_completed_ + 1;
2470 // Our step sizes are a factor 1 - (`total_steps_attempted` + 1)^(-
2471 // `step_size_reduction_exponent`) smaller than they could be as a margin to
2472 // reduce rejected steps.
2473 // The std::isinf() test protects against NAN if std::pow() == 1.0.
2474 const double first_term =
2475 std::isinf(step_size_limit)
2476 ? step_size_limit
2477 : (1 - std::pow(total_steps_attempted + 1.0,
2478 -params_.adaptive_linesearch_parameters()
2479 .step_size_reduction_exponent())) *
2480 step_size_limit;
2481 const double second_term =
2482 (1 + std::pow(total_steps_attempted + 1.0,
2483 -params_.adaptive_linesearch_parameters()
2484 .step_size_growth_exponent())) *
2485 step_size_;
2486 // From the first term when we have to reject a step, `step_size_`
2487 // decreases by a factor of at least 1 - (`total_steps_attempted` + 1)^(-
2488 // `step_size_reduction_exponent`). From the second term we increase
2489 // `step_size_` by a factor of at most 1 + (`total_steps_attempted` +
2490 // 1)^(-`step_size_growth_exponent`) Therefore if more than order
2491 // (`total_steps_attempted` + 1)^(`step_size_reduction_exponent`
2492 // - `step_size_growth_exponent`) fraction of the time we have a rejected
2493 // step, we overall decrease `step_size_`. When `step_size_` is
2494 // sufficiently small we stop having rejected steps.
2495 step_size_ = std::min(first_term, second_term);
2496 }
2497 // `inner_iterations` is incremented for the accepted step.
2498 num_rejected_steps_ += inner_iterations - 1;
2499 return outcome;
2500}
2501
2502InnerStepOutcome Solver::TakeConstantSizeStep() {
2503 const double primal_step_size = step_size_ / primal_weight_;
2504 const double dual_step_size = step_size_ * primal_weight_;
2505 NextSolutionAndDelta next_primal_solution =
2506 ComputeNextPrimalSolution(primal_step_size);
2507 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
2508 dual_step_size, /*extrapolation_factor=*/1.0, next_primal_solution);
2509 const double movement =
2510 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
2511 if (movement == 0.0) {
2512 LogNumericalTermination();
2513 ResetAverageToCurrent();
2514 return InnerStepOutcome::kForceNumericalTermination;
2515 } else if (movement > kDivergentMovement) {
2516 LogNumericalTermination();
2517 return InnerStepOutcome::kForceNumericalTermination;
2518 }
2519 VectorXd next_dual_product = TransposedMatrixVectorProduct(
2520 WorkingQp().constraint_matrix, next_dual_solution.value,
2521 ShardedWorkingQp().ConstraintMatrixSharder());
2522 current_primal_solution_ = std::move(next_primal_solution.value);
2523 current_dual_solution_ = std::move(next_dual_solution.value);
2524 current_dual_product_ = std::move(next_dual_product);
2525 current_primal_delta_ = std::move(next_primal_solution.delta);
2526 current_dual_delta_ = std::move(next_dual_solution.delta);
2527 primal_average_.Add(current_primal_solution_, /*weight=*/step_size_);
2528 dual_average_.Add(current_dual_solution_, /*weight=*/step_size_);
2529 return InnerStepOutcome::kSuccessful;
2530}
2531
2532IterationStats Solver::TotalWorkSoFar(const SolveLog& solve_log) const {
2533 IterationStats stats = CreateSimpleIterationStats(RESTART_CHOICE_NO_RESTART);
2534 IterationStats full_stats =
2535 AddWorkStats(stats, WorkFromFeasibilityPolishing(solve_log));
2536 return full_stats;
2537}
2538
2539FeasibilityPolishingDetails BuildFeasibilityPolishingDetails(
2540 PolishingPhaseType phase_type, int iteration_count,
2541 const PrimalDualHybridGradientParams& params, const SolveLog& solve_log) {
2542 FeasibilityPolishingDetails details;
2543 details.set_polishing_phase_type(phase_type);
2544 details.set_main_iteration_count(iteration_count);
2545 *details.mutable_params() = params;
2546 details.set_termination_reason(solve_log.termination_reason());
2547 details.set_iteration_count(solve_log.iteration_count());
2548 details.set_solve_time_sec(solve_log.solve_time_sec());
2549 *details.mutable_solution_stats() = solve_log.solution_stats();
2550 details.set_solution_type(solve_log.solution_type());
2551 absl::c_copy(solve_log.iteration_stats(),
2552 google::protobuf::RepeatedPtrFieldBackInserter(
2553 details.mutable_iteration_stats()));
2554 return details;
2555}
2556
2557// Note: `TERMINATION_REASON_INTERRUPTED_BY_USER` is treated as a work limit
2558// (that was determined in real-time by the user).
2559bool TerminationReasonIsWorkLimit(const TerminationReason reason) {
2560 return reason == TERMINATION_REASON_ITERATION_LIMIT ||
2561 reason == TERMINATION_REASON_TIME_LIMIT ||
2562 reason == TERMINATION_REASON_KKT_MATRIX_PASS_LIMIT ||
2563 reason == TERMINATION_REASON_INTERRUPTED_BY_USER;
2564}
2565
2566std::optional<SolverResult> Solver::TryFeasibilityPolishing(
2567 const int iteration_limit, const std::atomic<bool>* interrupt_solve,
2568 SolveLog& solve_log) {
2569 TerminationCriteria::DetailedOptimalityCriteria optimality_criteria =
2570 EffectiveOptimalityCriteria(params_.termination_criteria());
2571
2572 VectorXd average_primal = PrimalAverage();
2573 VectorXd average_dual = DualAverage();
2574
2575 ConvergenceInformation first_convergence_info;
2576 preprocess_solver_->ComputeConvergenceAndInfeasibilityFromWorkingSolution(
2577 params_, average_primal, average_dual, POINT_TYPE_AVERAGE_ITERATE,
2578 &first_convergence_info, nullptr);
2579
2580 if (!ObjectiveGapMet(optimality_criteria, first_convergence_info)) {
2581 if (params_.verbosity_level() >= 2) {
2582 SOLVER_LOG(&preprocess_solver_->Logger(),
2583 "Skipping feasibility polishing because the objective gap "
2584 "is too large.");
2585 }
2586 return std::nullopt;
2587 }
2588
2589 if (params_.verbosity_level() >= 2) {
2590 SOLVER_LOG(&preprocess_solver_->Logger(),
2591 "Starting primal feasibility polishing");
2592 }
2593 SolverResult primal_result = TryPrimalPolishing(
2594 std::move(average_primal), iteration_limit, interrupt_solve, solve_log);
2595
2596 if (params_.verbosity_level() >= 2) {
2597 SOLVER_LOG(
2598 &preprocess_solver_->Logger(),
2599 "Primal feasibility polishing termination reason: ",
2600 TerminationReason_Name(primal_result.solve_log.termination_reason()));
2601 }
2602 if (TerminationReasonIsWorkLimit(
2603 primal_result.solve_log.termination_reason())) {
2604 return std::nullopt;
2605 } else if (primal_result.solve_log.termination_reason() !=
2606 TERMINATION_REASON_OPTIMAL) {
2607 // Note: `TERMINATION_REASON_PRIMAL_INFEASIBLE` could happen normally, but
2608 // we haven't ensured that the correct solution is returned in that case, so
2609 // we ignore the polishing result indicating infeasibility.
2610 // `TERMINATION_REASON_NUMERICAL_ERROR` can occur, but would be surprising
2611 // and interesting. Other termination reasons are probably bugs.
2612 SOLVER_LOG(&preprocess_solver_->Logger(),
2613 "WARNING: Primal feasibility polishing terminated with error ",
2614 primal_result.solve_log.termination_reason());
2615 return std::nullopt;
2616 }
2617
2618 if (params_.verbosity_level() >= 2) {
2619 SOLVER_LOG(&preprocess_solver_->Logger(),
2620 "Starting dual feasibility polishing");
2621 }
2622 SolverResult dual_result = TryDualPolishing(
2623 std::move(average_dual), iteration_limit, interrupt_solve, solve_log);
2624
2625 if (params_.verbosity_level() >= 2) {
2626 SOLVER_LOG(
2627 &preprocess_solver_->Logger(),
2628 "Dual feasibility polishing termination reason: ",
2629 TerminationReason_Name(dual_result.solve_log.termination_reason()));
2630 }
2631
2632 if (TerminationReasonIsWorkLimit(
2633 dual_result.solve_log.termination_reason())) {
2634 return std::nullopt;
2635 } else if (dual_result.solve_log.termination_reason() !=
2636 TERMINATION_REASON_OPTIMAL) {
2637 // Note: The comment in the corresponding location when checking the
2638 // termination reason for primal feasibility polishing applies here
2639 // too.
2640 SOLVER_LOG(&preprocess_solver_->Logger(),
2641 "WARNING: Dual feasibility polishing terminated with error ",
2642 dual_result.solve_log.termination_reason());
2643 return std::nullopt;
2644 }
2645
2646 IterationStats full_stats = TotalWorkSoFar(solve_log);
2647 preprocess_solver_->ComputeConvergenceAndInfeasibilityFromWorkingSolution(
2648 params_, primal_result.primal_solution, dual_result.dual_solution,
2649 POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION,
2650 full_stats.add_convergence_information(), nullptr);
2651 if (params_.verbosity_level() >= 2) {
2652 SOLVER_LOG(&preprocess_solver_->Logger(),
2653 "solution stats for polished solution:");
2654 LogIterationStatsHeader(params_.verbosity_level(),
2655 /*use_feasibility_polishing=*/true,
2656 preprocess_solver_->Logger());
2657 LogIterationStats(params_.verbosity_level(),
2658 /*use_feasibility_polishing=*/true,
2659 IterationType::kFeasibilityPolishingTermination,
2660 full_stats, params_.termination_criteria(),
2661 preprocess_solver_->OriginalBoundNorms(),
2662 POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION,
2663 preprocess_solver_->Logger());
2664 }
2665 std::optional<TerminationReasonAndPointType> earned_termination =
2666 CheckIterateTerminationCriteria(params_.termination_criteria(),
2667 full_stats,
2668 preprocess_solver_->OriginalBoundNorms(),
2669 /*force_numerical_termination=*/false);
2670 if (earned_termination.has_value()) {
2671 return ConstructSolverResult(std::move(primal_result.primal_solution),
2672 std::move(dual_result.dual_solution),
2673 full_stats, earned_termination->reason,
2674 POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION,
2675 solve_log);
2676 }
2677 // Note: A typical termination check would now call
2678 // `CheckSimpleTerminationCriteria`. However, there is no obvious iterate to
2679 // use for a `SolverResult`. Instead, allow the main iteration loop to notice
2680 // if the termination criteria checking has caused us to meet the simple
2681 // termination criteria. This would be a rare case anyway, since dual
2682 // feasibility polishing didn't terminate because of a work limit, and
2683 // `full_stats` is created shortly after `TryDualPolishing` returned.
2684 return std::nullopt;
2685}
2686
2687TerminationCriteria ReduceWorkLimitsByPreviousWork(
2688 TerminationCriteria criteria, const int iteration_limit,
2689 const IterationStats& previous_work) {
2690 criteria.set_iteration_limit(std::max(
2691 0, std::min(iteration_limit, criteria.iteration_limit() -
2692 previous_work.iteration_number())));
2693 criteria.set_kkt_matrix_pass_limit(
2694 std::max(0.0, criteria.kkt_matrix_pass_limit() -
2695 previous_work.cumulative_kkt_matrix_passes()));
2696 criteria.set_time_sec_limit(std::max(
2697 0.0, criteria.time_sec_limit() - previous_work.cumulative_time_sec()));
2698 return criteria;
2699}
2700
2701SolverResult Solver::TryPrimalPolishing(
2702 VectorXd starting_primal_solution, const int iteration_limit,
2703 const std::atomic<bool>* interrupt_solve, SolveLog& solve_log) {
2704 PrimalDualHybridGradientParams primal_feasibility_params = params_;
2705 *primal_feasibility_params.mutable_termination_criteria() =
2706 ReduceWorkLimitsByPreviousWork(params_.termination_criteria(),
2707 iteration_limit,
2708 TotalWorkSoFar(solve_log));
2709
2710 // This will save the original objective after the swap.
2711 VectorXd objective;
2712 SetZero(ShardedWorkingQp().PrimalSharder(), objective);
2713 preprocess_solver_->SwapObjectiveVector(objective);
2714
2715 TerminationCriteria::DetailedOptimalityCriteria criteria =
2716 EffectiveOptimalityCriteria(params_.termination_criteria());
2717 const double kInfinity = std::numeric_limits<double>::infinity();
2718 criteria.set_eps_optimal_dual_residual_absolute(kInfinity);
2719 criteria.set_eps_optimal_dual_residual_relative(kInfinity);
2720 criteria.set_eps_optimal_objective_gap_absolute(kInfinity);
2721 criteria.set_eps_optimal_objective_gap_relative(kInfinity);
2722 *primal_feasibility_params.mutable_termination_criteria()
2723 ->mutable_detailed_optimality_criteria() = criteria;
2724 // TODO(user): Evaluate disabling primal weight updates for this primal
2725 // feasibility problem.
2726 VectorXd primal_feasibility_starting_dual;
2727 SetZero(ShardedWorkingQp().DualSharder(), primal_feasibility_starting_dual);
2728 Solver primal_solver(primal_feasibility_params,
2729 std::move(starting_primal_solution),
2730 std::move(primal_feasibility_starting_dual), step_size_,
2731 primal_weight_, preprocess_solver_);
2732 SolveLog primal_solve_log;
2733 // Stop `timer_`, since the time in `primal_solver.Solve()` will be recorded
2734 // by its timer.
2735 timer_.Stop();
2736 SolverResult primal_result = primal_solver.Solve(
2737 IterationType::kPrimalFeasibility, interrupt_solve, primal_solve_log);
2738 timer_.Start();
2739 // Restore the original objective.
2740 preprocess_solver_->SwapObjectiveVector(objective);
2741
2742 *solve_log.add_feasibility_polishing_details() =
2743 BuildFeasibilityPolishingDetails(
2744 POLISHING_PHASE_TYPE_PRIMAL_FEASIBILITY, iterations_completed_,
2745 primal_feasibility_params, primal_result.solve_log);
2746 return primal_result;
2747}
2748
2749VectorXd MapFiniteValuesToZero(const Sharder& sharder, const VectorXd& input) {
2750 VectorXd output(input.size());
2751 const auto make_finite_values_zero = [](const double x) {
2752 return std::isfinite(x) ? 0.0 : x;
2753 };
2754 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
2755 shard(output) = shard(input).unaryExpr(make_finite_values_zero);
2756 });
2757 return output;
2758}
2759
2760SolverResult Solver::TryDualPolishing(VectorXd starting_dual_solution,
2761 const int iteration_limit,
2762 const std::atomic<bool>* interrupt_solve,
2763 SolveLog& solve_log) {
2764 PrimalDualHybridGradientParams dual_feasibility_params = params_;
2765 *dual_feasibility_params.mutable_termination_criteria() =
2766 ReduceWorkLimitsByPreviousWork(params_.termination_criteria(),
2767 iteration_limit,
2768 TotalWorkSoFar(solve_log));
2769
2770 // These will initially contain the homogenous variable and constraint
2771 // bounds, but will contain the original variable and constraint bounds
2772 // after the swap.
2773 VectorXd constraint_lower_bounds = MapFiniteValuesToZero(
2774 ShardedWorkingQp().DualSharder(), WorkingQp().constraint_lower_bounds);
2775 VectorXd constraint_upper_bounds = MapFiniteValuesToZero(
2776 ShardedWorkingQp().DualSharder(), WorkingQp().constraint_upper_bounds);
2777 VectorXd variable_lower_bounds = MapFiniteValuesToZero(
2778 ShardedWorkingQp().PrimalSharder(), WorkingQp().variable_lower_bounds);
2779 VectorXd variable_upper_bounds = MapFiniteValuesToZero(
2780 ShardedWorkingQp().PrimalSharder(), WorkingQp().variable_upper_bounds);
2781 preprocess_solver_->SwapConstraintBounds(constraint_lower_bounds,
2782 constraint_upper_bounds);
2783 preprocess_solver_->SwapVariableBounds(variable_lower_bounds,
2784 variable_upper_bounds);
2785
2786 TerminationCriteria::DetailedOptimalityCriteria criteria =
2787 EffectiveOptimalityCriteria(params_.termination_criteria());
2788 const double kInfinity = std::numeric_limits<double>::infinity();
2789 criteria.set_eps_optimal_primal_residual_absolute(kInfinity);
2790 criteria.set_eps_optimal_primal_residual_relative(kInfinity);
2791 criteria.set_eps_optimal_objective_gap_absolute(kInfinity);
2792 criteria.set_eps_optimal_objective_gap_relative(kInfinity);
2793 *dual_feasibility_params.mutable_termination_criteria()
2794 ->mutable_detailed_optimality_criteria() = criteria;
2795 // TODO(user): Evaluate disabling primal weight updates for this dual
2796 // feasibility problem.
2797 VectorXd dual_feasibility_starting_primal;
2798 SetZero(ShardedWorkingQp().PrimalSharder(), dual_feasibility_starting_primal);
2799 Solver dual_solver(dual_feasibility_params,
2800 std::move(dual_feasibility_starting_primal),
2801 std::move(starting_dual_solution), step_size_,
2802 primal_weight_, preprocess_solver_);
2803 SolveLog dual_solve_log;
2804 // Stop `timer_`, since the time in `dual_solver.Solve()` will be recorded
2805 // by its timer.
2806 timer_.Stop();
2807 SolverResult dual_result = dual_solver.Solve(IterationType::kDualFeasibility,
2808 interrupt_solve, dual_solve_log);
2809 timer_.Start();
2810 // Restore the original bounds.
2811 preprocess_solver_->SwapConstraintBounds(constraint_lower_bounds,
2812 constraint_upper_bounds);
2813 preprocess_solver_->SwapVariableBounds(variable_lower_bounds,
2814 variable_upper_bounds);
2815 *solve_log.add_feasibility_polishing_details() =
2816 BuildFeasibilityPolishingDetails(
2817 POLISHING_PHASE_TYPE_DUAL_FEASIBILITY, iterations_completed_,
2818 dual_feasibility_params, dual_result.solve_log);
2819 return dual_result;
2820}
2821
2822SolverResult Solver::Solve(const IterationType iteration_type,
2823 const std::atomic<bool>* interrupt_solve,
2824 SolveLog solve_log) {
2825 preprocessing_time_sec_ = solve_log.preprocessing_time_sec();
2826 timer_.Start();
2827 last_primal_start_point_ =
2828 CloneVector(current_primal_solution_, ShardedWorkingQp().PrimalSharder());
2829 last_dual_start_point_ =
2830 CloneVector(current_dual_solution_, ShardedWorkingQp().DualSharder());
2831 // Note: Any cached values computed here also need to be recomputed after a
2832 // restart.
2833
2834 ratio_last_two_step_sizes_ = 1;
2835 current_dual_product_ = TransposedMatrixVectorProduct(
2836 WorkingQp().constraint_matrix, current_dual_solution_,
2837 ShardedWorkingQp().ConstraintMatrixSharder());
2838
2839 // This is set to true if we can't proceed any more because of numerical
2840 // issues. We may or may not have found the optimal solution.
2841 bool force_numerical_termination = false;
2842
2843 int next_feasibility_polishing_iteration = 100;
2844
2845 num_rejected_steps_ = 0;
2846
2847 IterationStats work_from_feasibility_polishing =
2848 WorkFromFeasibilityPolishing(solve_log);
2849 for (iterations_completed_ = 0;; ++iterations_completed_) {
2850 // This code performs the logic of the major iterations and termination
2851 // checks. It may modify the current solution and primal weight (e.g., when
2852 // performing a restart).
2853 const std::optional<SolverResult> maybe_result =
2854 MajorIterationAndTerminationCheck(
2855 iteration_type, force_numerical_termination, interrupt_solve,
2856 work_from_feasibility_polishing, solve_log);
2857 if (maybe_result.has_value()) {
2858 return maybe_result.value();
2859 }
2860
2861 if (params_.use_feasibility_polishing() &&
2862 iteration_type == IterationType::kNormal &&
2863 iterations_completed_ >= next_feasibility_polishing_iteration) {
2864 // The total number of iterations in feasibility polishing is at most
2865 // `4 * iterations_completed_ / kFeasibilityIterationFraction`.
2866 // One factor of two is because there are both primal and dual feasibility
2867 // polishing phases, and the other factor of two is because
2868 // `next_feasibility_polishing_iteration` increases by a factor of 2 each
2869 // feasibility polishing phase, so the sum of iteration limits is at most
2870 // twice the last value.
2871 const int kFeasibilityIterationFraction = 8;
2872 const std::optional<SolverResult> feasibility_result =
2873 TryFeasibilityPolishing(
2874 iterations_completed_ / kFeasibilityIterationFraction,
2875 interrupt_solve, solve_log);
2876 if (feasibility_result.has_value()) {
2877 return *feasibility_result;
2878 }
2879 next_feasibility_polishing_iteration *= 2;
2880 // Update work to include new feasibility phases.
2881 work_from_feasibility_polishing = WorkFromFeasibilityPolishing(solve_log);
2882 }
2883
2884 // TODO(user): If we use a step rule that could reject many steps in a
2885 // row, we should add a termination check within this loop also. For the
2886 // Malitsky and Pock rule, we perform a termination check and declare
2887 // NUMERICAL_ERROR whenever we hit 60 inner iterations.
2888 InnerStepOutcome outcome;
2889 switch (params_.linesearch_rule()) {
2890 case PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE:
2891 outcome = TakeMalitskyPockStep();
2892 break;
2893 case PrimalDualHybridGradientParams::ADAPTIVE_LINESEARCH_RULE:
2894 outcome = TakeAdaptiveStep();
2895 break;
2896 case PrimalDualHybridGradientParams::CONSTANT_STEP_SIZE_RULE:
2897 outcome = TakeConstantSizeStep();
2898 break;
2899 default:
2900 LOG(FATAL) << "Unrecognized linesearch rule "
2901 << params_.linesearch_rule();
2902 }
2903 if (outcome == InnerStepOutcome::kForceNumericalTermination) {
2904 force_numerical_termination = true;
2905 }
2906 } // loop over iterations
2907}
2908
2909} // namespace
2910
2912 QuadraticProgram qp, const PrimalDualHybridGradientParams& params,
2913 const std::atomic<bool>* interrupt_solve,
2914 std::function<void(const std::string&)> message_callback,
2915 IterationStatsCallback iteration_stats_callback) {
2916 return PrimalDualHybridGradient(std::move(qp), params, std::nullopt,
2917 interrupt_solve, std::move(message_callback),
2918 std::move(iteration_stats_callback));
2919}
2920
2922 QuadraticProgram qp, const PrimalDualHybridGradientParams& params,
2923 std::optional<PrimalAndDualSolution> initial_solution,
2924 const std::atomic<bool>* interrupt_solve,
2925 std::function<void(const std::string&)> message_callback,
2926 IterationStatsCallback iteration_stats_callback) {
2927 SolverLogger logger;
2928 logger.EnableLogging(true);
2929 if (message_callback) {
2930 logger.AddInfoLoggingCallback(std::move(message_callback));
2931 } else {
2932 logger.SetLogToStdOut(true);
2933 }
2934 const absl::Status params_status =
2936 if (!params_status.ok()) {
2937 return ErrorSolverResult(TERMINATION_REASON_INVALID_PARAMETER,
2938 params_status.ToString(), logger);
2939 }
2940 if (!qp.constraint_matrix.isCompressed()) {
2941 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
2942 "constraint_matrix must be in compressed format. "
2943 "Call constraint_matrix.makeCompressed()",
2944 logger);
2945 }
2946 const absl::Status dimensions_status = ValidateQuadraticProgramDimensions(qp);
2947 if (!dimensions_status.ok()) {
2948 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
2949 dimensions_status.ToString(), logger);
2950 }
2951 if (qp.objective_scaling_factor == 0) {
2952 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
2953 "The objective scaling factor cannot be zero.",
2954 logger);
2955 }
2956 if (params.use_feasibility_polishing() && !IsLinearProgram(qp)) {
2957 return ErrorSolverResult(
2958 TERMINATION_REASON_INVALID_PARAMETER,
2959 "use_feasibility_polishing is only implemented for linear programs.",
2960 logger);
2961 }
2962 PreprocessSolver solver(std::move(qp), params, &logger);
2963 return solver.PreprocessAndSolve(params, std::move(initial_solution),
2964 interrupt_solve,
2965 std::move(iteration_stats_callback));
2966}
2967
2968namespace internal {
2969
2972 glop::ProblemSolution glop_solution(
2973 glop::RowIndex(solution.dual_solution.size()),
2974 glop::ColIndex(solution.primal_solution.size()));
2975 // This doesn't matter much as glop's preprocessor doesn't use this much.
2976 // We pick IMPRECISE since we are often calling this code early in the solve.
2977 glop_solution.status = glop::ProblemStatus::IMPRECISE;
2978 for (glop::RowIndex i{0}; i.value() < solution.dual_solution.size(); ++i) {
2979 if (qp.constraint_lower_bounds[i.value()] ==
2980 qp.constraint_upper_bounds[i.value()]) {
2981 glop_solution.constraint_statuses[i] =
2982 glop::ConstraintStatus::FIXED_VALUE;
2983 } else if (solution.dual_solution[i.value()] > 0) {
2984 glop_solution.constraint_statuses[i] =
2985 glop::ConstraintStatus::AT_LOWER_BOUND;
2986 } else if (solution.dual_solution[i.value()] < 0) {
2987 glop_solution.constraint_statuses[i] =
2988 glop::ConstraintStatus::AT_UPPER_BOUND;
2989 } else {
2990 glop_solution.constraint_statuses[i] = glop::ConstraintStatus::BASIC;
2991 }
2992 }
2993
2994 for (glop::ColIndex i{0}; i.value() < solution.primal_solution.size(); ++i) {
2995 const bool at_lb = solution.primal_solution[i.value()] <=
2996 qp.variable_lower_bounds[i.value()];
2997 const bool at_ub = solution.primal_solution[i.value()] >=
2998 qp.variable_upper_bounds[i.value()];
2999 // Note that `ShardedWeightedAverage` is designed so that variables at their
3000 // bounds will be exactly at their bounds even with floating-point roundoff.
3001 if (at_lb) {
3002 if (at_ub) {
3003 glop_solution.variable_statuses[i] = glop::VariableStatus::FIXED_VALUE;
3004 } else {
3005 glop_solution.variable_statuses[i] =
3006 glop::VariableStatus::AT_LOWER_BOUND;
3007 }
3008 } else {
3009 if (at_ub) {
3010 glop_solution.variable_statuses[i] =
3011 glop::VariableStatus::AT_UPPER_BOUND;
3012 } else {
3013 glop_solution.variable_statuses[i] = glop::VariableStatus::BASIC;
3014 }
3015 }
3016 }
3017 return glop_solution;
3018}
3019
3020} // namespace internal
3021
3022} // namespace operations_research::pdlp
double Get() const
Definition timer.h:46
void Start()
When Start() is called multiple times, only the most recent is used.
Definition timer.h:32
void Stop()
Definition timer.h:40
void EnableLogging(bool enable)
Definition logging.h:46
void SetLogToStdOut(bool enable)
Should all messages be displayed on stdout ?
Definition logging.h:52
void AddInfoLoggingCallback(std::function< void(const std::string &message)> callback)
Definition logging.cc:29
int64_t value
GRBmodel * model
TerminationReason termination
double solution
Fractional SquaredNorm(const SparseColumn &v)
Definition lp_utils.cc:36
TerminationReason
The reason a call to Solve() terminates.
absl::StatusOr< SolveResult > Solve(const Model &model, const SolverType solver_type, const SolveArguments &solve_args, const SolverInitArguments &init_args)
Definition solve.cc:62
glop::ProblemSolution ComputeStatuses(const QuadraticProgram &qp, const PrimalAndDualSolution &solution)
Validation utilities for solvers.proto.
std::optional< TerminationReasonAndPointType > CheckSimpleTerminationCriteria(const TerminationCriteria &criteria, const IterationStats &stats, const std::atomic< bool > *interrupt_solve)
absl::StatusOr< MPModelProto > QpToMpModelProto(const QuadraticProgram &qp)
void SetZero(const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:178
InfeasibilityInformation ComputeInfeasibilityInformation(const PrimalDualHybridGradientParams &params, const ShardedQuadraticProgram &scaled_sharded_qp, const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec, const Eigen::VectorXd &scaled_primal_ray, const Eigen::VectorXd &scaled_dual_ray, const Eigen::VectorXd &primal_solution_for_residual_tests, PointType candidate_type)
QuadraticProgramStats ComputeStats(const ShardedQuadraticProgram &qp)
Returns a QuadraticProgramStats for a ShardedQuadraticProgram.
std::optional< ConvergenceInformation > GetConvergenceInformation(const IterationStats &stats, PointType candidate_type)
absl::Status ValidateQuadraticProgramDimensions(const QuadraticProgram &qp)
absl::Status ValidatePrimalDualHybridGradientParams(const PrimalDualHybridGradientParams &params)
void SetRandomProjections(const ShardedQuadraticProgram &sharded_qp, const Eigen::VectorXd &primal_solution, const Eigen::VectorXd &dual_solution, const std::vector< int > &random_projection_seeds, PointMetadata &metadata)
double SquaredDistance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:257
double LInfNorm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:235
TerminationCriteria::DetailedOptimalityCriteria EffectiveOptimalityCriteria(const TerminationCriteria &termination_criteria)
Computes the effective optimality criteria for a TerminationCriteria.
double Distance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:264
VectorXd ReducedCosts(const PrimalDualHybridGradientParams &params, const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_solution, bool use_zero_primal_objective)
VectorXd TransposedMatrixVectorProduct(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:163
QuadraticProgramBoundNorms BoundNormsFromProblemStats(const QuadraticProgramStats &stats)
double EpsilonRatio(const double epsilon_absolute, const double epsilon_relative)
SingularValueAndIterations EstimateMaximumSingularValueOfConstraintMatrix(const ShardedQuadraticProgram &sharded_qp, const std::optional< VectorXd > &primal_solution, const std::optional< VectorXd > &dual_solution, const double desired_relative_error, const double failure_probability, std::mt19937 &mt_generator)
void ProjectToPrimalVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &primal, const bool use_feasibility_bounds)
absl::StatusOr< QuadraticProgram > QpFromMpModelProto(const MPModelProto &proto, bool relax_integer_variables, bool include_names)
bool IsLinearProgram(const QuadraticProgram &qp)
void ProjectToDualVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &dual)
std::optional< TerminationReasonAndPointType > CheckIterateTerminationCriteria(const TerminationCriteria &criteria, const IterationStats &stats, const QuadraticProgramBoundNorms &bound_norms, const bool force_numerical_termination)
void CoefficientWiseProductInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:216
void CoefficientWiseQuotientInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:223
VectorXd ZeroVector(const Sharder &sharder)
Like VectorXd::Zero(sharder.NumElements()).
Definition sharder.cc:184
VectorXd CloneVector(const VectorXd &vec, const Sharder &sharder)
Definition sharder.cc:210
SolverResult PrimalDualHybridGradient(QuadraticProgram qp, const PrimalDualHybridGradientParams &params, const std::atomic< bool > *interrupt_solve, std::function< void(const std::string &)> message_callback, IterationStatsCallback iteration_stats_callback)
VectorXd OnesVector(const Sharder &sharder)
Like VectorXd::Ones(sharder.NumElements()).
Definition sharder.cc:190
bool HasValidBounds(const ShardedQuadraticProgram &sharded_qp)
ScalingVectors ApplyRescaling(const RescalingOptions &rescaling_options, ShardedQuadraticProgram &sharded_qp)
bool ObjectiveGapMet(const TerminationCriteria::DetailedOptimalityCriteria &optimality_criteria, const ConvergenceInformation &stats)
ConvergenceInformation ComputeConvergenceInformation(const PrimalDualHybridGradientParams &params, const ShardedQuadraticProgram &scaled_sharded_qp, const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec, const Eigen::VectorXd &scaled_primal_solution, const Eigen::VectorXd &scaled_dual_solution, const double componentwise_primal_residual_offset, const double componentwise_dual_residual_offset, PointType candidate_type)
LocalizedLagrangianBounds ComputeLocalizedLagrangianBounds(const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_solution, const PrimalDualNorm primal_dual_norm, const double primal_weight, const double radius, const VectorXd *primal_product, const VectorXd *dual_product, const bool use_diagonal_qp_trust_region_solver, const double diagonal_qp_trust_region_solver_tolerance)
double Norm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:253
RelativeConvergenceInformation ComputeRelativeResiduals(const TerminationCriteria::DetailedOptimalityCriteria &optimality_criteria, const ConvergenceInformation &stats, const QuadraticProgramBoundNorms &bound_norms)
void AssignVector(const VectorXd &vec, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:204
double BoundGap(const LocalizedLagrangianBounds &bounds)
@ kNormal
An intermediate iteration in the "main" phase.
@ kPrimalFeasibility
An intermediate iteration during a primal feasibility polishing phase.
@ kFeasibilityPolishingTermination
Terminating with a solution found by feasibility polishing.
@ kPresolveTermination
Terminating with a solution found by presolve.
@ kDualFeasibility
An intermediate iteration during a dual feasibility polishing phase.
@ kNormalTermination
Terminating with a solution found by the "main" phase.
STL namespace.
static int input(yyscan_t yyscanner)
bool presolved_problem_was_maximization
glop::MainLpPreprocessor preprocessor
const VectorXd trivial_row_scaling_vec
ShardedQuadraticProgram sharded_original_qp
const VectorXd trivial_col_scaling_vec
double distance_moved_last_restart_period
int length_of_last_restart_period
glop::GlopParameters preprocessor_parameters
const Variable x
Definition qp_tests.cc:127
int64_t delta
Definition resource.cc:1709
Contains the solution of a LinearProgram as returned by a preprocessor.
Definition lp_data.h:671
ConstraintStatusColumn constraint_statuses
Definition lp_data.h:700
ProblemStatus status
The solution status.
Definition lp_data.h:679
Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > constraint_matrix
std::string message
Definition trace.cc:397
#define SOLVER_LOG(logger,...)
Definition logging.h:109