Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
set_cover_lagrangian.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdlib>
18#include <limits>
19#include <tuple>
20#include <vector>
21
22#include "absl/log/check.h"
27
28namespace operations_research {
29
30// Notes from a discussion with Luca Accorsi (accorsi@) and Francesco Cavaliere
31// regarding [1]:
32// - the 3-phase algorithm in the paper actually uses pricing (which would
33// better be called "partial" pricing),
34// - the columns that were used in the preceding solutions should be fixed,
35// because otherwise it diversifies too much and degrades the best solution
36// (under "queue" in the paper).
37// - the median algorithm is already in the STL (nth_element).
38
39// Denoted as u in [1], it is a dual vector: a column vector of nonnegative
40// (zero is included) multipliers for the different constraints.
41// A deterministic way to compute a feasible (non-optimal) u:
42// For all element indices i, u_i = min {j \in J_i} c_j / |I_j|, where
43// |I_j| denotes the number of elements covered by subset j.
44//
45// Concerning the fundamental ideas behind this approach, one may refer to [2].
47 ElementCostVector multipliers(model_.num_elements(),
48 std::numeric_limits<Cost>::infinity());
49 SubsetCostVector marginal_costs(model_.num_subsets());
50 // TODO(user): Parallelize this.
51 for (const SubsetIndex subset : model_.SubsetRange()) {
52 marginal_costs[subset] =
53 model_.subset_costs()[subset] / model_.columns()[subset].size();
54 }
55 // TODO(user): Parallelize this.
56 for (const ElementIndex element : model_.ElementRange()) {
57 // Minimum marginal cost to cover this element.
58 Cost min_marginal_cost = std::numeric_limits<Cost>::infinity();
59 const SparseRowView& rows = model_.rows();
60 // TODO(user): use std::min_element on rows[element] with a custom
61 // comparator that gets marginal_costs[subset]. Check performance.
62 for (const SubsetIndex subset : rows[element]) {
63 min_marginal_cost = std::min(min_marginal_cost, marginal_costs[subset]);
64 }
65 multipliers[element] = min_marginal_cost;
66 }
67 return multipliers;
68}
69
70namespace {
71// Computes the scalar product between a column and a vector of duals.
72// Profiling has shown that this is where most of the time is spent.
73// TODO(user): make this visible to other algorithms.
74// TODO(user): Investigate.
75Cost ScalarProduct(const SparseColumn& column, const ElementCostVector& dual) {
76 Cost result = 0.0;
77 for (ColumnEntryIndex pos(0); pos.value() < column.size(); ++pos) {
78 result += dual[column[pos]];
79 }
80 return result;
81}
82
83// Computes the reduced costs for a subset of subsets.
84// This is a helper function for ParallelComputeReducedCosts().
85// It is called on a slice of subsets, defined by start and end.
86// The reduced costs are computed using the multipliers vector.
87// The columns of the subsets are given by the columns view.
88// The result is stored in reduced_costs.
89void FillReducedCostsSlice(SubsetIndex start, SubsetIndex end,
90 const SubsetCostVector& costs,
91 const ElementCostVector& multipliers,
92 const SparseColumnView& columns,
93 SubsetCostVector* reduced_costs) {
94 for (SubsetIndex subset = start; subset < end; ++subset) {
95 (*reduced_costs)[subset] =
96 costs[subset] - ScalarProduct(columns[subset], multipliers);
97 }
98}
99} // namespace
100
101// Computes the reduced costs for all subsets in parallel using ThreadPool.
103 const SubsetCostVector& costs, const ElementCostVector& multipliers) const {
104 const SubsetIndex num_subsets(model_.num_subsets());
105 // TODO(user): compute a close-to-optimal k-subset partitioning.
106 const SubsetIndex block_size =
107 SubsetIndex(1) + num_subsets / num_threads_; // [***] Arbitrary choice.
108 const SparseColumnView& columns = model_.columns();
109 SubsetCostVector reduced_costs(num_subsets);
110 ThreadPool thread_pool("ParallelComputeReducedCosts", num_threads_);
111 thread_pool.StartWorkers();
112 {
113 // TODO(user): check how costly it is to create a new ThreadPool.
114 // TODO(user): use a queue of subsets to process? instead of a fixed range.
115
116 // This parallelization is not very efficient, because all the threads
117 // use the same costs vector. Maybe it should be local to the thread.
118 // It's unclear whether sharing columns and costs is better than having
119 // each thread use its own partial copy.
120 // Finally, it might be better to use a queue of subsets to process, instead
121 // of a fixed range.
122 for (SubsetIndex start(0); start < num_subsets; start += block_size) {
123 thread_pool.Schedule([start, block_size, num_subsets, &costs,
124 &multipliers, &columns, &reduced_costs]() {
125 const SubsetIndex end = std::min(start + block_size, num_subsets);
126 FillReducedCostsSlice(start, end, costs, multipliers, columns,
127 &reduced_costs);
128 });
129 }
130 } // Synchronize all the threads. This is equivalent to a wait.
131 return reduced_costs;
132}
133
134// Reduced cost (row vector). Denoted as c_j(u) in [1], right after equation
135// (5). For a subset j, c_j(u) = c_j - sum_{i \in I_j}.u_i. I_j is the set of
136// indices for elements in subset j. For a general Integer Program A.x <= b,
137// this would be:
138// c_j(u) = c_j - sum_{i \in I_j} a_{ij}.u_i
140 const SubsetCostVector& costs, const ElementCostVector& multipliers) const {
141 const SparseColumnView& columns = model_.columns();
142 SubsetCostVector reduced_costs(costs.size());
143 FillReducedCostsSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()),
144 costs, multipliers, columns, &reduced_costs);
145 return reduced_costs;
146}
147
148namespace {
149// Helper function to compute the subgradient.
150// It fills a slice of the subgradient vector from indices start to end.
151// This is a helper function for ParallelComputeSubgradient().
152// The subgradient is computed using the reduced costs vector.
153void FillSubgradientSlice(SubsetIndex start, SubsetIndex end,
154 const SparseColumnView& columns,
155 const SubsetCostVector& reduced_costs,
156 ElementCostVector* subgradient) {
157 for (SubsetIndex subset(start); subset < end; ++subset) {
158 if (reduced_costs[subset] < 0.0) {
159 for (const ElementIndex element : columns[subset]) {
160 (*subgradient)[element] -= 1.0;
161 }
162 }
163 }
164}
165} // namespace
166
167// Vector of primal slack variable. Denoted as s_i(u) in [1], equation (6).
168// For all element indices i, s_i(u) = 1 - sum_{j \in J_i} x_j(u),
169// where J_i denotes the set of indices of subsets j covering element i.
170// For a general Integer Program A x <= b, the subgradient cost vector is
171// defined as A x - b. See [2].
173 const SubsetCostVector& reduced_costs) const {
174 // NOTE(user): Should the initialization be done with coverage[element]?
175 ElementCostVector subgradient(model_.num_elements(), 1.0);
176 FillSubgradientSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()),
177 model_.columns(), reduced_costs, &subgradient);
178 return subgradient;
179}
180
182 const SubsetCostVector& reduced_costs) const {
183 const SubsetIndex num_subsets(model_.num_subsets());
184 const SubsetIndex block_size =
185 SubsetIndex(1) + num_subsets / num_threads_; // [***]
186 const SparseColumnView& columns = model_.columns();
187 ElementCostVector subgradient(model_.num_elements(), 1.0);
188 // The subgradient has one component per element, each thread processes
189 // several subsets. Hence, have one vector per thread to avoid data races.
190 // TODO(user): it may be better to split the elements among the threads,
191 // although this might be less well-balanced.
192 std::vector<ElementCostVector> subgradients(
193 num_threads_, ElementCostVector(model_.num_elements()));
194 ThreadPool thread_pool("ParallelComputeSubgradient", num_threads_);
195 thread_pool.StartWorkers();
196 {
197 int thread_index = 0;
198 for (SubsetIndex start(0); start < num_subsets;
199 start += block_size, ++thread_index) {
200 thread_pool.Schedule([start, block_size, num_subsets, &reduced_costs,
201 &columns, &subgradients, thread_index]() {
202 const SubsetIndex end = std::min(start + block_size, num_subsets);
203 FillSubgradientSlice(start, end, columns, reduced_costs,
204 &subgradients[thread_index]);
205 });
206 }
207 } // Synchronize all the threads.
208 for (int thread_index = 0; thread_index < num_threads_; ++thread_index) {
209 for (const ElementIndex element : model_.ElementRange()) {
210 subgradient[element] += subgradients[thread_index][element];
211 }
212 }
213 return subgradient;
214}
215
216namespace {
217// Helper function to compute the value of the Lagrangian.
218// This is a helper function for ParallelComputeLagrangianValue().
219// It is called on a slice of elements, defined by start and end.
220// The value of the Lagrangian is computed using the reduced costs vector and
221// the multipliers vector.
222// The result is stored in lagrangian_value.
223void FillLagrangianValueSlice(SubsetIndex start, SubsetIndex end,
224 const SubsetCostVector& reduced_costs,
225 Cost* lagrangian_value) {
226 // This is min \sum_{j \in N} c_j(u) x_j. This captures the remark above (**),
227 // taking into account the possible values for x_j, and using them to minimize
228 // the terms.
229 for (SubsetIndex subset(start); subset < end; ++subset) {
230 if (reduced_costs[subset] < 0.0) {
231 *lagrangian_value += reduced_costs[subset];
232 }
233 }
234}
235} // namespace
236
237// Compute the (scalar) value of the Lagrangian vector by fixing the value of
238// x_j based on the sign of c_j(u). In [1] equation (4), it is:
239// L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} u_i . This is obtained
240// - if c_j(u) < 0: x_j(u) = 1,
241// - if c_j(u) > 0: x_j(u) = 0, (**)
242// - if c_j(u) = 0: x_j(u) is unbound, in {0, 1}, we use 0.
243// For a general Integer Program A x <= b, the Lagrangian vector L(u) [2] is
244// L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} b_i.u_i.
246 const SubsetCostVector& reduced_costs,
247 const ElementCostVector& multipliers) const {
248 Cost lagrangian_value = 0.0;
249 // This is \sum{i \in M} u_i.
250 for (const Cost u : multipliers) {
251 lagrangian_value += u;
252 }
253 FillLagrangianValueSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()),
254 reduced_costs, &lagrangian_value);
255 return lagrangian_value;
256}
257
259 const SubsetCostVector& reduced_costs,
260 const ElementCostVector& multipliers) const {
261 const SubsetIndex num_subsets(model_.num_subsets());
262 const SubsetIndex block_size =
263 SubsetIndex(1) + num_subsets / num_threads_; // [***] Arbitrary.
264 Cost lagrangian_value = 0.0;
265 // This is \sum{i \in M} u_i.
266
267 for (const Cost u : multipliers) {
268 lagrangian_value += u;
269 }
270 std::vector<Cost> lagrangian_values(num_threads_, 0.0);
271 ThreadPool thread_pool("ParallelComputeLagrangianValue", num_threads_);
272 thread_pool.StartWorkers();
273 {
274 int thread_index = 0;
275 for (SubsetIndex start(0); start < num_subsets; start += block_size) {
276 thread_pool.Schedule([start, block_size, num_subsets, thread_index,
277 &reduced_costs, &lagrangian_values]() {
278 const SubsetIndex end = std::min(start + block_size, num_subsets);
279 FillLagrangianValueSlice(start, end, reduced_costs,
280 &lagrangian_values[thread_index]);
281 });
282 ++thread_index;
283 }
284 } // Synchronize all the threads.
285 for (const Cost l : lagrangian_values) {
286 lagrangian_value += l;
287 }
288 return lagrangian_value;
289}
290
291// Perform a subgradient step.
292// In the general case, for an Integer Program A.x <=b, the Lagragian
293// multipliers vector at step k+1 is defined as: u^{k+1} = u^k + t_k (A x^k - b)
294// with term t_k = lambda_k * (UB - L(u^k)) / |A x^k - b|^2.
295// |.| is the 2-norm (i.e. Euclidean)
296// In our case, the problem A x <= b is in the form A x >= 1. We need to
297// replace A x - b by s_i(u) = 1 - sum_{j \in J_i} x_j(u).
298// |A x^k - b|^2 = |s(u)|^2, and t_k is of the form:
299// t_k = lambda_k * (UB - L(u^k)) / |s^k(u)|^2.
300// Now, the coordinates of the multipliers vectors u^k, u^k_i are nonnegative,
301// i.e. u^k_i >= 0. Negative values are simply cut off. Following [3], each of
302// the coordinates is defined as: u^{k+1}_i =
303// max(u^k_i + lambda_k * (UB - L(u^k)) / |s^k(u)|^2 * s^k_i(u), 0).
304// This is eq. (7) in [1].
306 double step_size, Cost lagrangian_value, Cost upper_bound,
307 const SubsetCostVector& reduced_costs,
308 ElementCostVector* multipliers) const {
309 // step_size is \lambda_k in [1].
310 DCHECK_GT(step_size, 0);
311 // Compute the square of the Euclidean norm of the subgradient vector.
312 const ElementCostVector subgradient = ComputeSubgradient(reduced_costs);
313 Cost subgradient_square_norm = 0.0;
314 for (const Cost x : subgradient) {
315 subgradient_square_norm += x * x;
316 }
317 // First compute lambda_k * (UB - L(u^k)).
318 const Cost factor =
319 step_size * (upper_bound - lagrangian_value) / subgradient_square_norm;
320 for (const ElementIndex element : model_.ElementRange()) {
321 // Avoid multipliers to go negative and to go over the roof. 1e6 chosen
322 // arbitrarily. [***]
323 (*multipliers)[element] = std::clamp(
324 (*multipliers)[element] + factor * subgradient[element], 0.0, 1e6);
325 }
326}
327
329 double step_size, Cost lagrangian_value, Cost upper_bound,
330 const SubsetCostVector& reduced_costs,
331 ElementCostVector* multipliers) const {
332 // step_size is \lambda_k in [1].
333 DCHECK_GT(step_size, 0);
334 // Compute the square of the Euclidean norm of the subgradient vector.
335 const ElementCostVector subgradient =
336 ParallelComputeSubgradient(reduced_costs);
337 Cost subgradient_square_norm = 0.0;
338 for (const Cost x : subgradient) {
339 subgradient_square_norm += x * x;
340 }
341 // First compute lambda_k * (UB - L(u^k)).
342 const Cost factor =
343 step_size * (upper_bound - lagrangian_value) / subgradient_square_norm;
344 for (const ElementIndex element : model_.ElementRange()) {
345 // Avoid multipliers to go negative and to go through the roof. 1e6 chosen
346 // arbitrarily. [***]
347 (*multipliers)[element] = std::clamp(
348 (*multipliers)[element] + factor * subgradient[element], 0.0, 1e6);
349 }
350}
351
353 const SubsetCostVector& reduced_costs, const SubsetBoolVector& solution,
354 const ElementCostVector& multipliers) const {
355 Cost gap = 0.0;
356 // TODO(user): Parallelize this, if need be.
357 for (const SubsetIndex subset : model_.SubsetRange()) {
358 if (solution[subset] && reduced_costs[subset] > 0.0) {
359 gap += reduced_costs[subset];
360 } else if (!solution[subset] && reduced_costs[subset] < 0.0) {
361 // gap += std::abs(reduced_costs[subset]); We know the sign of rhs.
362 gap -= reduced_costs[subset];
363 }
364 }
365 const ElementToIntVector& coverage = inv_->coverage();
366 for (const ElementIndex element : model_.ElementRange()) {
367 gap += (coverage[element] - 1) * multipliers[element];
368 }
369 return gap;
370}
371
372SubsetCostVector SetCoverLagrangian::ComputeDelta(
373 const SubsetCostVector& reduced_costs,
374 const ElementCostVector& multipliers) const {
376 const ElementToIntVector& coverage = inv_->coverage();
377 // This is definition (9) in [1].
378 const SparseColumnView& columns = model_.columns();
379 // TODO(user): Parallelize this.
380 for (const SubsetIndex subset : model_.SubsetRange()) {
381 delta[subset] = std::max(reduced_costs[subset], 0.0);
382 for (const ElementIndex element : columns[subset]) {
383 const double size = coverage[element];
384 delta[subset] += multipliers[element] * (size - 1.0) / size;
385 }
386 }
387 return delta;
388}
389
390namespace {
391// Helper class to compute the step size for the multipliers.
392// The step size is updated every period iterations.
393// The step size is updated by multiplying it by a factor 0.5 or 1.5
394// on the relative change in the lower bound is greater than 0.01 or less
395// than 0.001, respectively. The lower bound is updated every period
396// iterations.
397class StepSizer {
398 public:
399 StepSizer(int period, double step_size)
400 : period_(period), iter_to_check_(period), step_size_(step_size) {
401 ResetBounds();
402 }
403 double UpdateStepSize(int iter, Cost lower_bound) {
404 min_lb_ = std::min(min_lb_, lower_bound);
405 max_lb_ = std::max(max_lb_, lower_bound);
406 if (iter == iter_to_check_) {
407 iter_to_check_ += period_;
408 // Bounds can be negative, so we need to take the absolute value.
409 // We also need to avoid division by zero. We decide to return 0.0 in
410 // that case, which is the same as not updating the step size.
411 const Cost lb_gap =
412 max_lb_ == 0.0 ? 0.0 : (max_lb_ - min_lb_) / std::abs(max_lb_);
413 DCHECK_GE(lb_gap, 0.0);
414 if (lb_gap > 0.01) {
415 step_size_ *= 0.5;
416 } else if (lb_gap <= 0.001) {
417 step_size_ *= 1.5;
418 }
419 step_size_ = std::clamp(step_size_, 1e-6, 10.0);
420 ResetBounds();
421 }
422 return step_size_;
423 }
424
425 private:
426 void ResetBounds() {
427 min_lb_ = std::numeric_limits<Cost>::infinity();
428 max_lb_ = -std::numeric_limits<Cost>::infinity();
429 }
430 int period_;
431 int iter_to_check_;
432 double step_size_;
433 Cost min_lb_;
434 Cost max_lb_;
435};
436
437// Helper class to decide whether to stop the algorithm.
438// The algorithm stops when the lower bound is not updated for a certain
439// number of iterations.
440class Stopper {
441 public:
442 explicit Stopper(int period)
443 : period_(period),
444 iter_to_check_(period),
445 lower_bound_(std::numeric_limits<Cost>::min()) {}
446 bool DecideWhetherToStop(int iter, Cost lb) {
447 DCHECK_GE(lb, lower_bound_);
448 if (iter == iter_to_check_) {
449 iter_to_check_ += period_;
450 const Cost delta = lb - lower_bound_;
451 const Cost relative_delta = delta / lb;
452 DCHECK_GE(delta, 0.0);
453 DCHECK_GE(relative_delta, 0.0);
454 lower_bound_ = lb;
455 return relative_delta < 0.001 && delta < 1;
456 }
457 return false;
458 }
459
460 private:
461 int period_;
462 int iter_to_check_;
463 Cost lower_bound_;
464};
465
466} // namespace
467
468namespace {
469// TODO(user): Add this to the file defining AdjustableKAryHeap.
470template <typename Priority, typename Index, bool IsMaxHeap>
471class TopKHeap {
472 public:
473 explicit TopKHeap(int max_size) : heap_(), max_size_(max_size) {}
474 void Clear() { heap_.Clear(); }
475 void Add(Priority priority, Index index) {
476 if (heap_.Size() < max_size_) {
477 heap_.Add(priority, index);
478 } else if (heap_.HasPriority(priority, heap_.TopPriority())) {
479 heap_.ReplaceTop(priority, index);
480 }
481 }
482
483 private:
484 AdjustableKAryHeap<Priority, Index, /*Arity=*/2, !IsMaxHeap> heap_;
485 int max_size_;
486};
487} // namespace
488
489// Computes a lower bound on the optimal cost.
490std::tuple<Cost, SubsetCostVector, ElementCostVector>
493 Cost lower_bound = 0.0;
495 double step_size = 0.1; // [***] arbitrary, from [1].
496 StepSizer step_sizer(20, step_size); // [***] arbitrary, from [1].
497 Stopper stopper(100); // [***] arbitrary, from [1].
498 SubsetCostVector reduced_costs(costs);
499 // For the time being, 4 threads seems to be the fastest.
500 // Running linux perf of the process shows that up to 60% of the cycles are
501 // lost as idle cycles in the CPU backend, probably because the algorithm is
502 // memory bound.
503 for (int iter = 0; iter < 1000; ++iter) {
504 reduced_costs = ParallelComputeReducedCosts(costs, multipliers);
505 const Cost lagrangian_value =
506 ComputeLagrangianValue(reduced_costs, multipliers);
507 UpdateMultipliers(step_size, lagrangian_value, upper_bound, reduced_costs,
508 &multipliers);
509 lower_bound = std::max(lower_bound, lagrangian_value);
510 // step_size should be updated like this. For the time besing, we keep the
511 // step size, because the implementation of the rest is not adequate yet
512 // step_size = step_sizer.UpdateStepSize(iter, lagrangian_value);
513 // if (stopper.DecideWhetherToStop(iter, lower_bound)) {
514 // break;
515 // }
516 }
517 return std::make_tuple(lower_bound, reduced_costs, multipliers);
518}
519
520} // namespace operations_research
IntegerValue size
int64_t min
const ElementToIntVector & coverage() const
Returns vector containing number of subsets covering each element.
void UpdateMultipliers(double step_size, Cost lagrangian_value, Cost upper_bound, const SubsetCostVector &reduced_costs, ElementCostVector *multipliers) const
Cost ComputeLagrangianValue(const SubsetCostVector &reduced_costs, const ElementCostVector &multipliers) const
ElementCostVector ComputeSubgradient(const SubsetCostVector &reduced_costs) const
void ParallelUpdateMultipliers(double step_size, Cost lagrangian_value, Cost upper_bound, const SubsetCostVector &reduced_costs, ElementCostVector *multipliers) const
ElementCostVector ParallelComputeSubgradient(const SubsetCostVector &reduced_costs) const
SubsetCostVector ParallelComputeReducedCosts(const SubsetCostVector &costs, const ElementCostVector &multipliers) const
Computes the reduced costs for all subsets in parallel using ThreadPool.
Cost ParallelComputeLagrangianValue(const SubsetCostVector &reduced_costs, const ElementCostVector &multipliers) const
ElementCostVector InitializeLagrangeMultipliers() const
Initializes the multipliers vector (u) based on the cost per subset.
std::tuple< Cost, SubsetCostVector, ElementCostVector > ComputeLowerBound(const SubsetCostVector &costs, Cost upper_bound)
Computes a lower bound on the optimal cost.
SubsetCostVector ComputeReducedCosts(const SubsetCostVector &costs, const ElementCostVector &multipliers) const
Cost ComputeGap(const SubsetCostVector &reduced_costs, const SubsetBoolVector &solution, const ElementCostVector &multipliers) const
util_intops::StrongIntRange< ElementIndex > ElementRange() const
const SparseRowView & rows() const
Row view of the set covering problem.
util_intops::StrongIntRange< SubsetIndex > SubsetRange() const
Access to the ranges of subsets and elements.
const SubsetCostVector & subset_costs() const
Vector of costs for each subset.
const SparseColumnView & columns() const
Column view of the set covering problem.
void Schedule(std::function< void()> closure)
Definition threadpool.cc:83
double upper_bound
double lower_bound
int index
double solution
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:52
In SWIG mode, we don't want anything besides these top-level includes.
util_intops::StrongVector< ElementIndex, Cost, CostAllocator > ElementCostVector
util_intops::StrongVector< SubsetIndex, SparseColumn > SparseColumnView
double Cost
Basic non-strict type for cost. The speed penalty for using double is ~2%.
util_intops::StrongVector< SubsetIndex, Cost, CostAllocator > SubsetCostVector
STL namespace.
int column
const Variable x
Definition qp_tests.cc:127
int64_t delta
Definition resource.cc:1709
std::optional< int64_t > end
int64_t start