Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
dual_edge_norms.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <cstdlib>
17
20
21namespace operations_research {
22namespace glop {
23
25 : basis_factorization_(basis_factorization),
26 recompute_edge_squared_norms_(true) {}
27
29 return recompute_edge_squared_norms_;
30}
31
32void DualEdgeNorms::Clear() { recompute_edge_squared_norms_ = true; }
33
34void DualEdgeNorms::ResizeOnNewRows(RowIndex new_size) {
35 edge_squared_norms_.resize(new_size, 1.0);
36}
37
39 if (recompute_edge_squared_norms_) ComputeEdgeSquaredNorms();
40 return edge_squared_norms_.const_view();
41}
42
44 const ColumnPermutation& col_perm) {
45 if (recompute_edge_squared_norms_) return;
47 col_perm.const_view(), &edge_squared_norms_, &tmp_edge_squared_norms_);
48}
49
50bool DualEdgeNorms::TestPrecision(RowIndex leaving_row,
51 const ScatteredRow& unit_row_left_inverse) {
52 // This should only be true if we do not use steepest edge.
53 if (recompute_edge_squared_norms_) return true;
54
55 // ||unit_row_left_inverse||^2 is the same as
56 // edge_squared_norms_[leaving_row], but with a better precision. If the
57 // difference between the two is too large, we trigger a full recomputation.
58 const Fractional leaving_squared_norm =
59 SquaredNorm(TransposedView(unit_row_left_inverse));
60 const Fractional old_squared_norm = edge_squared_norms_[leaving_row];
61 const Fractional estimated_edge_norms_accuracy =
62 (sqrt(leaving_squared_norm) - sqrt(old_squared_norm)) /
63 sqrt(leaving_squared_norm);
64 stats_.edge_norms_accuracy.Add(estimated_edge_norms_accuracy);
65
66 if (std::abs(estimated_edge_norms_accuracy) >
67 parameters_.recompute_edges_norm_threshold()) {
68 VLOG(1) << "Recomputing edge norms: " << sqrt(leaving_squared_norm)
69 << " vs " << sqrt(old_squared_norm);
70 recompute_edge_squared_norms_ = true;
71 }
72
73 // We just do not want to pivot on a position with an under-estimated norm.
74 edge_squared_norms_[leaving_row] = leaving_squared_norm;
75 const bool result = old_squared_norm > 0.25 * leaving_squared_norm;
76 if (!result) {
77 VLOG(1) << "Recomputing leaving row. Norm was " << sqrt(old_squared_norm)
78 << " vs precise version " << sqrt(leaving_squared_norm);
79 }
80 return result;
81}
82
84 ColIndex entering_col, RowIndex leaving_row,
85 const ScatteredColumn& direction,
86 const ScatteredRow& unit_row_left_inverse) {
87 // No need to update if we will recompute it from scratch later.
88 if (recompute_edge_squared_norms_) return;
89 const DenseColumn& tau = ComputeTau(TransposedView(unit_row_left_inverse));
90 SCOPED_TIME_STAT(&stats_);
91
92 const Fractional pivot = direction[leaving_row];
93 const Fractional new_leaving_squared_norm =
94 edge_squared_norms_[leaving_row] / Square(pivot);
95
96 // Update the norm.
97 int stat_lower_bounded_norms = 0;
98 auto output = edge_squared_norms_.view();
99 for (const auto e : direction) {
100 // Note that the update formula used is important to maximize the precision.
101 // See Koberstein's PhD section 8.2.2.1.
102 output[e.row()] +=
103 e.coefficient() * (e.coefficient() * new_leaving_squared_norm -
104 2.0 / pivot * tau[e.row()]);
105
106 // Avoid 0.0 norms (The 1e-4 is the value used by Koberstein).
107 // TODO(user): use a more precise lower bound depending on the column norm?
108 // We can do that with Cauchy-Schwarz inequality:
109 // (edge . leaving_column)^2 = 1.0 < ||edge||^2 * ||leaving_column||^2
110 const Fractional kLowerBound = 1e-4;
111 if (output[e.row()] < kLowerBound) {
112 if (e.row() == leaving_row) continue;
113 output[e.row()] = kLowerBound;
114 ++stat_lower_bounded_norms;
115 }
116 }
117 output[leaving_row] = new_leaving_squared_norm;
118 IF_STATS_ENABLED(stats_.lower_bounded_norms.Add(stat_lower_bounded_norms));
119}
120
121void DualEdgeNorms::ComputeEdgeSquaredNorms() {
122 SCOPED_TIME_STAT(&stats_);
123
124 // time_limit_->LimitReached() can be costly sometimes, so we only do that
125 // if we feel this will be slow anyway.
126 const bool test_limit = (time_limit_ != nullptr) &&
127 basis_factorization_.NumberOfEntriesInLU() > 10'000;
128
129 // Since we will do a lot of inversions, it is better to be as efficient and
130 // precise as possible by having a refactorized basis.
131 DCHECK(basis_factorization_.IsRefactorized());
132 const RowIndex num_rows = basis_factorization_.GetNumberOfRows();
133 edge_squared_norms_.resize(num_rows, 1.0);
134 for (RowIndex row(0); row < num_rows; ++row) {
135 edge_squared_norms_[row] = basis_factorization_.DualEdgeSquaredNorm(row);
136
137 // This operation can be costly, and we abort if we are stuck here.
138 // Note that we still mark edges as "recomputed" otherwise we can runs into
139 // some DCHECK before we actually abort the solve.
140 if (test_limit && time_limit_->LimitReached()) break;
141 }
142 recompute_edge_squared_norms_ = false;
143}
144
145const DenseColumn& DualEdgeNorms::ComputeTau(
146 const ScatteredColumn& unit_row_left_inverse) {
147 SCOPED_TIME_STAT(&stats_);
148 const DenseColumn& result =
149 basis_factorization_.RightSolveForTau(unit_row_left_inverse);
150 IF_STATS_ENABLED(stats_.tau_density.Add(Density(Transpose(result))));
151 return result;
152}
153
154} // namespace glop
155} // namespace operations_research
const DenseColumn & RightSolveForTau(const ScatteredColumn &a) const
bool IsRefactorized() const
Returns true if the factorization was just recomputed.
RowIndex GetNumberOfRows() const
Return the number of rows in the basis.
DenseColumn::ConstView GetEdgeSquaredNorms()
DualEdgeNorms(const BasisFactorization &basis_factorization)
Takes references to the linear program data we need.
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
Updates the norms if the columns of the basis where permuted.
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
bool TestPrecision(RowIndex leaving_row, const ScatteredRow &unit_row_left_inverse)
StrictITISpan< IndexType, const IndexType > const_view() const
Definition permutation.h:97
StrictITISpan< RowIndex, const Fractional > ConstView
Definition lp_types.h:291
double Density(const DenseRow &row)
Definition lp_utils.cc:176
Fractional Square(Fractional f)
Definition lp_utils.h:41
Permutation< ColIndex > ColumnPermutation
const DenseRow & Transpose(const DenseColumn &col)
Similar comment as the other Transpose() implementation above.
Definition lp_utils.h:199
Fractional SquaredNorm(const SparseColumn &v)
Definition lp_utils.cc:36
StrictITIVector< RowIndex, Fractional > DenseColumn
Column-vector types. Column-vector types are indexed by a row index.
Definition lp_types.h:380
void ApplyColumnPermutationToRowIndexedVector(StrictITISpan< ColIndex, const ColIndex > col_perm, RowIndexedVector *v, RowIndexedVector *tmp)
const ScatteredRow & TransposedView(const ScatteredColumn &c)
In SWIG mode, we don't want anything besides these top-level includes.
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
#define SCOPED_TIME_STAT(stats)
Definition stats.h:418