26#include "Eigen/SparseCore"
27#include "absl/log/check.h"
28#include "absl/random/distributions.h"
37constexpr double kInfinity = std::numeric_limits<double>::infinity();
38using ::Eigen::ColMajor;
39using ::Eigen::SparseMatrix;
40using ::Eigen::VectorXd;
41using ::Eigen::VectorXi;
56 CHECK_EQ(datapoint.size(), average_.size());
59 const double weight_ratio =
weight / (sum_weights_ +
weight);
61 shard(average_) += weight_ratio * (shard(datapoint) - shard(average_));
83double CombineBounds(
const double v1,
const double v2) {
85 if (std::abs(v1) < kInfinity) {
88 if (std::abs(v2) < kInfinity) {
89 max = std::max(
max, std::abs(v2));
117class VectorInfoAccumulator {
119 VectorInfoAccumulator() {}
122 VectorInfoAccumulator(
const VectorInfoAccumulator&) =
delete;
123 VectorInfoAccumulator& operator=(
const VectorInfoAccumulator&) =
delete;
124 VectorInfoAccumulator(VectorInfoAccumulator&&) =
default;
125 VectorInfoAccumulator& operator=(VectorInfoAccumulator&&) =
default;
126 void Add(
double value);
127 void Add(
const VectorInfoAccumulator& other);
128 explicit operator VectorInfo()
const;
131 int64_t num_infinite_ = 0;
132 int64_t num_zero_ = 0;
133 int64_t num_finite_nonzero_ = 0;
137 double sum_squared_ = 0.0;
140void VectorInfoAccumulator::Add(
const double value) {
141 if (std::isinf(
value)) {
143 }
else if (
value == 0) {
146 ++num_finite_nonzero_;
147 const double abs_value = std::abs(
value);
148 max_ = std::max(max_, abs_value);
149 min_ = std::min(min_, abs_value);
151 sum_squared_ += abs_value * abs_value;
155void VectorInfoAccumulator::Add(
const VectorInfoAccumulator& other) {
156 num_infinite_ += other.num_infinite_;
157 num_zero_ += other.num_zero_;
158 num_finite_nonzero_ += other.num_finite_nonzero_;
159 max_ = std::max(max_, other.max_);
160 min_ = std::min(min_, other.min_);
162 sum_squared_ += other.sum_squared_;
165VectorInfoAccumulator::operator VectorInfo()
const {
167 .num_finite_nonzero = num_finite_nonzero_,
168 .num_infinite = num_infinite_,
169 .num_zero = num_zero_,
170 .largest = num_finite_nonzero_ > 0 ? max_ : 0.0,
171 .smallest = num_finite_nonzero_ > 0 ? min_ : 0.0,
172 .average = num_finite_nonzero_ + num_zero_ > 0
173 ? sum_ / (num_finite_nonzero_ + num_zero_)
174 :
std::numeric_limits<double>::quiet_NaN(),
179VectorInfo CombineAccumulators(
180 const std::vector<VectorInfoAccumulator>& accumulators) {
181 VectorInfoAccumulator result;
182 for (
const VectorInfoAccumulator& accumulator : accumulators) {
183 result.Add(accumulator);
185 return VectorInfo(result);
191VectorInfo ComputeVectorInfo(
const VectorXd& vec,
const Sharder& sharder) {
192 std::vector<VectorInfoAccumulator> local_accumulator(sharder.NumShards());
193 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
194 VectorInfoAccumulator shard_accumulator;
195 for (
double element : shard(vec)) {
196 shard_accumulator.Add(element);
198 local_accumulator[shard.Index()] = std::move(shard_accumulator);
200 return CombineAccumulators(local_accumulator);
203VectorInfo VariableBoundGapInfo(
const VectorXd&
lower_bounds,
205 const Sharder& sharder) {
206 std::vector<VectorInfoAccumulator> local_accumulator(sharder.NumShards());
207 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
208 VectorInfoAccumulator shard_accumulator;
210 shard_accumulator.Add(element);
212 local_accumulator[shard.Index()] = std::move(shard_accumulator);
214 return CombineAccumulators(local_accumulator);
217VectorInfo MatrixAbsElementInfo(
218 const SparseMatrix<double, ColMajor, int64_t>& matrix,
219 const Sharder& sharder) {
220 std::vector<VectorInfoAccumulator> local_accumulator(sharder.NumShards());
221 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
222 VectorInfoAccumulator shard_accumulator;
223 const auto matrix_shard = shard(matrix);
224 for (int64_t col_idx = 0; col_idx < matrix_shard.outerSize(); ++col_idx) {
225 for (
decltype(matrix_shard)::InnerIterator it(matrix_shard, col_idx); it;
227 shard_accumulator.Add(it.value());
230 local_accumulator[shard.Index()] = std::move(shard_accumulator);
232 return CombineAccumulators(local_accumulator);
235VectorInfo CombinedBoundsInfo(
const VectorXd& rhs_upper_bounds,
236 const VectorXd& rhs_lower_bounds,
237 const Sharder& sharder) {
238 std::vector<VectorInfoAccumulator> local_accumulator(sharder.NumShards());
239 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
240 VectorInfoAccumulator shard_accumulator;
241 const auto lb_shard = shard(rhs_lower_bounds);
242 const auto ub_shard = shard(rhs_upper_bounds);
243 for (int64_t i = 0;
i < lb_shard.size(); ++
i) {
244 shard_accumulator.Add(CombineBounds(ub_shard[i], lb_shard[i]));
246 local_accumulator[shard.Index()] = std::move(shard_accumulator);
248 return CombineAccumulators(local_accumulator);
251InfNormInfo ConstraintMatrixRowColInfo(
252 const SparseMatrix<double, ColMajor, int64_t>& constraint_matrix,
253 const SparseMatrix<double, ColMajor, int64_t>& constraint_matrix_transpose,
254 const Sharder& matrix_sharder,
const Sharder& matrix_transpose_sharder,
255 const Sharder& primal_sharder,
const Sharder& dual_sharder) {
257 constraint_matrix_transpose,
259 OnesVector(dual_sharder), matrix_transpose_sharder);
264 return InfNormInfo{.row_norms = ComputeVectorInfo(
row_norms, dual_sharder),
265 .col_norms = ComputeVectorInfo(
col_norms, primal_sharder)};
273 InfNormInfo cons_matrix_norm_info = ConstraintMatrixRowColInfo(
277 VectorInfo cons_matrix_info = MatrixAbsElementInfo(
279 VectorInfo combined_bounds_info =
282 VectorInfo combined_variable_bounds_info =
285 VectorInfo obj_vec_info =
287 VectorInfo gaps_info =
290 QuadraticProgramStats program_stats;
291 program_stats.set_num_variables(qp.
PrimalSize());
292 program_stats.set_num_constraints(qp.
DualSize());
293 program_stats.set_constraint_matrix_col_min_l_inf_norm(
294 cons_matrix_norm_info.col_norms.smallest);
295 program_stats.set_constraint_matrix_row_min_l_inf_norm(
296 cons_matrix_norm_info.row_norms.smallest);
297 program_stats.set_constraint_matrix_num_nonzeros(
298 cons_matrix_info.num_finite_nonzero);
299 program_stats.set_constraint_matrix_abs_max(cons_matrix_info.largest);
300 program_stats.set_constraint_matrix_abs_min(cons_matrix_info.smallest);
301 program_stats.set_constraint_matrix_abs_avg(cons_matrix_info.average);
302 program_stats.set_constraint_matrix_l2_norm(cons_matrix_info.l2_norm);
303 program_stats.set_combined_bounds_max(combined_bounds_info.largest);
304 program_stats.set_combined_bounds_min(combined_bounds_info.smallest);
305 program_stats.set_combined_bounds_avg(combined_bounds_info.average);
306 program_stats.set_combined_bounds_l2_norm(combined_bounds_info.l2_norm);
307 program_stats.set_combined_variable_bounds_max(
308 combined_variable_bounds_info.largest);
309 program_stats.set_combined_variable_bounds_min(
310 combined_variable_bounds_info.smallest);
311 program_stats.set_combined_variable_bounds_avg(
312 combined_variable_bounds_info.average);
313 program_stats.set_combined_variable_bounds_l2_norm(
314 combined_variable_bounds_info.l2_norm);
315 program_stats.set_variable_bound_gaps_num_finite(
316 gaps_info.num_finite_nonzero + gaps_info.num_zero);
317 program_stats.set_variable_bound_gaps_max(gaps_info.largest);
318 program_stats.set_variable_bound_gaps_min(gaps_info.smallest);
319 program_stats.set_variable_bound_gaps_avg(gaps_info.average);
320 program_stats.set_variable_bound_gaps_l2_norm(gaps_info.l2_norm);
321 program_stats.set_objective_vector_abs_max(obj_vec_info.largest);
322 program_stats.set_objective_vector_abs_min(obj_vec_info.smallest);
323 program_stats.set_objective_vector_abs_avg(obj_vec_info.average);
324 program_stats.set_objective_vector_l2_norm(obj_vec_info.l2_norm);
326 program_stats.set_objective_matrix_num_nonzeros(0);
327 program_stats.set_objective_matrix_abs_max(0);
328 program_stats.set_objective_matrix_abs_min(0);
329 program_stats.set_objective_matrix_abs_avg(
330 std::numeric_limits<double>::quiet_NaN());
331 program_stats.set_objective_matrix_l2_norm(0);
333 VectorInfo obj_matrix_info = ComputeVectorInfo(
335 program_stats.set_objective_matrix_num_nonzeros(
336 obj_matrix_info.num_finite_nonzero);
337 program_stats.set_objective_matrix_abs_max(obj_matrix_info.largest);
338 program_stats.set_objective_matrix_abs_min(obj_matrix_info.smallest);
339 program_stats.set_objective_matrix_abs_avg(obj_matrix_info.average);
340 program_stats.set_objective_matrix_l2_norm(obj_matrix_info.l2_norm);
342 return program_stats;
347enum class ScalingNorm { kL2, kLInf };
354void DivideBySquareRootOfDivisor(
const VectorXd& divisor,
355 const Sharder& sharder, VectorXd& vector) {
356 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
357 auto vec_shard = shard(vector);
358 const auto divisor_shard = shard(divisor);
360 if (divisor_shard[
index] != 0) {
361 vec_shard[
index] /= std::sqrt(divisor_shard[
index]);
367void ApplyScalingIterationsForNorm(
const ShardedQuadraticProgram& sharded_qp,
368 const int num_iterations,
369 const ScalingNorm norm,
370 VectorXd& row_scaling_vec,
371 VectorXd& col_scaling_vec) {
372 const QuadraticProgram& qp = sharded_qp.Qp();
373 const int64_t num_col = qp.constraint_matrix.cols();
374 const int64_t num_row = qp.constraint_matrix.rows();
375 CHECK_EQ(num_col, col_scaling_vec.size());
376 CHECK_EQ(num_row, row_scaling_vec.size());
377 for (
int i = 0;
i < num_iterations; ++
i) {
381 case ScalingNorm::kL2: {
384 sharded_qp.ConstraintMatrixSharder());
386 sharded_qp.TransposedConstraintMatrix(), col_scaling_vec,
387 row_scaling_vec, sharded_qp.TransposedConstraintMatrixSharder());
390 case ScalingNorm::kLInf: {
393 sharded_qp.ConstraintMatrixSharder());
395 sharded_qp.TransposedConstraintMatrix(), col_scaling_vec,
396 row_scaling_vec, sharded_qp.TransposedConstraintMatrixSharder());
400 DivideBySquareRootOfDivisor(col_norm, sharded_qp.PrimalSharder(),
402 DivideBySquareRootOfDivisor(row_norm, sharded_qp.DualSharder(),
410 const int num_iterations, VectorXd& row_scaling_vec,
411 VectorXd& col_scaling_vec) {
412 ApplyScalingIterationsForNorm(sharded_qp, num_iterations, ScalingNorm::kLInf,
413 row_scaling_vec, col_scaling_vec);
417 VectorXd& row_scaling_vec, VectorXd& col_scaling_vec) {
418 ApplyScalingIterationsForNorm(sharded_qp, 1,
419 ScalingNorm::kL2, row_scaling_vec,
428 bool do_rescale =
false;
432 scaling.row_scaling_vec, scaling.col_scaling_vec);
437 scaling.col_scaling_vec);
441 scaling.row_scaling_vec);
448 const VectorXd& dual_product) {
455 shard(result.gradient) =
457 value_parts[shard.
Index()] =
463 const VectorXd objective_product =
466 objective_product - shard(dual_product);
467 value_parts[shard.Index()] =
469 .dot(shard(result.gradient) - 0.5 * objective_product);
472 result.value = value_parts.sum();
477 const double constraint_upper_bound,
479 const double primal_product) {
481 return constraint_upper_bound;
482 }
else if (dual > 0.0) {
483 return constraint_lower_bound;
484 }
else if (std::isfinite(constraint_lower_bound) &&
485 std::isfinite(constraint_upper_bound)) {
486 if (primal_product < constraint_lower_bound) {
487 return constraint_lower_bound;
488 }
else if (primal_product > constraint_upper_bound) {
489 return constraint_upper_bound;
491 return primal_product;
493 }
else if (std::isfinite(constraint_lower_bound)) {
494 return constraint_lower_bound;
495 }
else if (std::isfinite(constraint_upper_bound)) {
496 return constraint_upper_bound;
503 const VectorXd& dual_solution,
504 const VectorXd& primal_product) {
512 const auto dual_solution_shard = shard(dual_solution);
513 auto dual_gradient_shard = shard(result.gradient);
514 const auto primal_product_shard = shard(primal_product);
515 double value_sum = 0.0;
516 for (int64_t i = 0; i < dual_gradient_shard.size(); ++i) {
518 constraint_lower_bounds[i], constraint_upper_bounds[i],
519 dual_solution_shard[i], primal_product_shard[i]);
520 value_sum += dual_gradient_shard[i] * dual_solution_shard[i];
522 value_parts[shard.
Index()] = value_sum;
523 dual_gradient_shard -= primal_product_shard;
525 result.value = value_parts.sum();
531using ::Eigen::ColMajor;
532using ::Eigen::SparseMatrix;
536double NormalizeVector(
const Sharder& sharder, VectorXd& vector) {
537 const double norm =
Norm(vector, sharder);
539 sharder.ParallelForEachShard(
540 [&](
const Sharder::Shard& shard) { shard(vector) /= norm; });
550double PowerMethodFailureProbability(int64_t dimension,
double epsilon,
int k) {
551 if (k < 2 || epsilon <= 0.0) {
555 return std::min(0.824, 0.354 / std::sqrt(epsilon * (k - 1))) *
556 std::sqrt(dimension) * std::pow(1.0 - epsilon, k - 0.5);
559SingularValueAndIterations EstimateMaximumSingularValue(
560 const SparseMatrix<double, ColMajor, int64_t>& matrix,
561 const SparseMatrix<double, ColMajor, int64_t>& matrix_transpose,
562 const std::optional<VectorXd>& active_set_indicator,
563 const std::optional<VectorXd>& transpose_active_set_indicator,
564 const Sharder& matrix_sharder,
const Sharder& matrix_transpose_sharder,
565 const Sharder& primal_vector_sharder,
const Sharder& dual_vector_sharder,
566 const double desired_relative_error,
const double failure_probability,
567 std::mt19937& mt_generator) {
568 const int64_t dimension = matrix.cols();
569 VectorXd eigenvector(dimension);
572 for (
double& entry : eigenvector) {
573 entry = absl::Gaussian<double>(mt_generator);
575 if (active_set_indicator.has_value()) {
579 NormalizeVector(primal_vector_sharder, eigenvector);
580 double eigenvalue_estimate = 0.0;
582 int num_iterations = 0;
587 const double epsilon = 1.0 -
MathUtil::Square(1.0 - desired_relative_error);
588 while (PowerMethodFailureProbability(dimension, epsilon, num_iterations) >
589 failure_probability) {
591 matrix_transpose, eigenvector, matrix_transpose_sharder);
592 if (transpose_active_set_indicator.has_value()) {
594 dual_vector_sharder, dual_eigenvector);
596 VectorXd next_eigenvector =
598 if (active_set_indicator.has_value()) {
600 primal_vector_sharder, next_eigenvector);
602 eigenvalue_estimate =
603 Dot(eigenvector, next_eigenvector, primal_vector_sharder);
604 eigenvector = std::move(next_eigenvector);
606 const double primal_norm =
607 NormalizeVector(primal_vector_sharder, eigenvector);
609 VLOG(1) <<
"Iteration " << num_iterations <<
" singular value estimate "
610 << std::sqrt(eigenvalue_estimate) <<
" primal norm " << primal_norm;
612 return SingularValueAndIterations{
613 .singular_value = std::sqrt(eigenvalue_estimate),
614 .num_iterations = num_iterations,
615 .estimated_relative_error = desired_relative_error};
620VectorXd ComputePrimalActiveSetIndicator(
621 const ShardedQuadraticProgram& sharded_qp,
623 VectorXd indicator(sharded_qp.PrimalSize());
624 sharded_qp.PrimalSharder().ParallelForEachShard(
625 [&](
const Sharder::Shard& shard) {
626 const auto lower_bound_shard =
627 shard(sharded_qp.Qp().variable_lower_bounds);
628 const auto upper_bound_shard =
629 shard(sharded_qp.Qp().variable_upper_bounds);
631 auto indicator_shard = shard(indicator);
632 const int64_t shard_size =
633 sharded_qp.PrimalSharder().ShardSize(shard.Index());
634 for (int64_t i = 0;
i < shard_size; ++
i) {
635 if ((primal_solution_shard[i] == lower_bound_shard[i]) ||
636 (primal_solution_shard[i] == upper_bound_shard[i])) {
637 indicator_shard[
i] = 0.0;
639 indicator_shard[
i] = 1.0;
648VectorXd ComputeDualActiveSetIndicator(
649 const ShardedQuadraticProgram& sharded_qp,
const VectorXd& dual_solution) {
650 VectorXd indicator(sharded_qp.DualSize());
651 sharded_qp.DualSharder().ParallelForEachShard(
652 [&](
const Sharder::Shard& shard) {
653 const auto lower_bound_shard =
654 shard(sharded_qp.Qp().constraint_lower_bounds);
655 const auto upper_bound_shard =
656 shard(sharded_qp.Qp().constraint_upper_bounds);
657 const auto dual_solution_shard = shard(dual_solution);
658 auto indicator_shard = shard(indicator);
659 const int64_t shard_size =
660 sharded_qp.DualSharder().ShardSize(shard.Index());
661 for (int64_t i = 0;
i < shard_size; ++
i) {
662 if (dual_solution_shard[i] == 0.0 &&
663 (std::isinf(lower_bound_shard[i]) ||
664 std::isinf(upper_bound_shard[i]))) {
665 indicator_shard[
i] = 0.0;
667 indicator_shard[
i] = 1.0;
679 const std::optional<VectorXd>& dual_solution,
680 const double desired_relative_error,
const double failure_probability,
681 std::mt19937& mt_generator) {
682 std::optional<VectorXd> primal_active_set_indicator;
683 std::optional<VectorXd> dual_active_set_indicator;
685 primal_active_set_indicator =
688 if (dual_solution.has_value()) {
689 dual_active_set_indicator =
690 ComputeDualActiveSetIndicator(sharded_qp, *dual_solution);
692 return EstimateMaximumSingularValue(
698 desired_relative_error, failure_probability, mt_generator);
703 const bool constraint_bounds_valid =
712 const bool variable_bounds_valid =
722 return constraint_bounds_valid && variable_bounds_valid;
727 const bool use_feasibility_bounds) {
728 const auto make_finite_values_zero = [](
const double x) {
729 return std::isfinite(
x) ? 0.0 :
x;
734 if (use_feasibility_bounds) {
738 .unaryExpr(make_finite_values_zero))
740 .unaryExpr(make_finite_values_zero));
742 shard(primal) = shard(primal)
756 auto dual_shard = shard(dual);
761 for (int64_t i = 0; i < dual_shard.size(); ++i) {
762 if (!std::isfinite(upper_bound_shard[i])) {
763 dual_shard[i] = std::max(dual_shard[i], 0.0);
765 if (!std::isfinite(lower_bound_shard[i])) {
766 dual_shard[i] = std::min(dual_shard[i], 0.0);
static T Square(const T x)
Returns the square of x.
int64_t PrimalSize() const
const Sharder & ConstraintMatrixSharder() const
Returns a Sharder intended for the columns of the QP's constraint matrix.
const QuadraticProgram & Qp() const
const Sharder & TransposedConstraintMatrixSharder() const
Returns a Sharder intended for the rows of the QP's constraint matrix.
const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > & TransposedConstraintMatrix() const
Returns a reference to the transpose of the QP's constraint matrix.
const Sharder & DualSharder() const
Returns a Sharder intended for dual vectors.
const Sharder & PrimalSharder() const
Returns a Sharder intended for primal vectors.
void RescaleQuadraticProgram(const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec)
void Add(const Eigen::VectorXd &datapoint, double weight)
ShardedWeightedAverage(const Sharder *sharder)
Eigen::VectorXd ComputeAverage() const
void Clear()
Clears the sum to zero, i.e., just constructed.
bool ParallelTrueForAllShards(const std::function< bool(const Shard &)> &func) const
void ParallelForEachShard(const std::function< void(const Shard &)> &func) const
Runs func on each of the shards.
Validation utilities for solvers.proto.
void SetZero(const Sharder &sharder, VectorXd &dest)
LagrangianPart ComputeDualGradient(const ShardedQuadraticProgram &sharded_qp, const VectorXd &dual_solution, const VectorXd &primal_product)
QuadraticProgramStats ComputeStats(const ShardedQuadraticProgram &qp)
Returns a QuadraticProgramStats for a ShardedQuadraticProgram.
double Dot(const VectorXd &v1, const VectorXd &v2, const Sharder &sharder)
LagrangianPart ComputePrimalGradient(const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_product)
VectorXd TransposedMatrixVectorProduct(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &vector, const Sharder &sharder)
void LInfRuizRescaling(const ShardedQuadraticProgram &sharded_qp, const int num_iterations, VectorXd &row_scaling_vec, VectorXd &col_scaling_vec)
SingularValueAndIterations EstimateMaximumSingularValueOfConstraintMatrix(const ShardedQuadraticProgram &sharded_qp, const std::optional< VectorXd > &primal_solution, const std::optional< VectorXd > &dual_solution, const double desired_relative_error, const double failure_probability, std::mt19937 &mt_generator)
VectorXd ScaledColLInfNorm(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &row_scaling_vec, const VectorXd &col_scaling_vec, const Sharder &sharder)
void ProjectToPrimalVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &primal, const bool use_feasibility_bounds)
constexpr double kInfinity
bool IsLinearProgram(const QuadraticProgram &qp)
void ProjectToDualVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &dual)
double DualSubgradientCoefficient(const double constraint_lower_bound, const double constraint_upper_bound, const double dual, const double primal_product)
void CoefficientWiseProductInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
VectorXd ZeroVector(const Sharder &sharder)
Like VectorXd::Zero(sharder.NumElements()).
void L2NormRescaling(const ShardedQuadraticProgram &sharded_qp, VectorXd &row_scaling_vec, VectorXd &col_scaling_vec)
VectorXd ScaledColL2Norm(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &row_scaling_vec, const VectorXd &col_scaling_vec, const Sharder &sharder)
VectorXd OnesVector(const Sharder &sharder)
Like VectorXd::Ones(sharder.NumElements()).
bool HasValidBounds(const ShardedQuadraticProgram &sharded_qp)
ScalingVectors ApplyRescaling(const RescalingOptions &rescaling_options, ShardedQuadraticProgram &sharded_qp)
double Norm(const VectorXd &vector, const Sharder &sharder)
void AssignVector(const VectorXd &vec, const Sharder &sharder, VectorXd &dest)
std::vector< double > lower_bounds
std::vector< double > upper_bounds
double smallest
The smallest absolute value of the finite non-zero values.
double l2_norm
The L2 norm of the finite values.
double average
The average absolute value of the finite values.
int64_t num_finite_nonzero
double largest
The largest absolute value of the finite non-zero values.
Eigen::VectorXd variable_lower_bounds
Eigen::VectorXd constraint_lower_bounds
Eigen::VectorXd objective_vector
std::optional< Eigen::DiagonalMatrix< double, Eigen::Dynamic > > objective_matrix
Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > constraint_matrix
Eigen::VectorXd variable_upper_bounds
Eigen::VectorXd constraint_upper_bounds
int l_inf_ruiz_iterations