Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
set_cover_lagrangian.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdlib>
18#include <limits>
19#include <tuple>
20#include <vector>
21
22#include "absl/log/check.h"
23#include "absl/synchronization/blocking_counter.h"
29
30namespace operations_research {
31
32// Notes from a discussion with Luca Accorsi (accorsi@) and Francesco Cavaliere
33// regarding [1]:
34// - the 3-phase algorithm in the paper actually uses pricing (which would
35// better be called "partial" pricing),
36// - the columns that were used in the preceding solutions should be fixed,
37// because otherwise it diversifies too much and degrades the best solution
38// (under "queue" in the paper).
39// - the median algorithm is already in the STL (nth_element).
40
41// Denoted as u in [1], it is a dual vector: a column vector of nonnegative
42// (zero is included) multipliers for the different constraints.
43// A deterministic way to compute a feasible (non-optimal) u:
44// For all element indices i, u_i = min {j \in J_i} c_j / |I_j|, where
45// |I_j| denotes the number of elements covered by subset j.
46//
47// Concerning the fundamental ideas behind this approach, one may refer to [2].
49 ElementCostVector multipliers(model()->num_elements(),
50 std::numeric_limits<Cost>::infinity());
51 SubsetCostVector marginal_costs(model()->num_subsets());
52 // TODO(user): Parallelize this.
53 for (const SubsetIndex subset : model()->SubsetRange()) {
54 marginal_costs[subset] =
55 model()->subset_costs()[subset] / model()->columns()[subset].size();
56 }
57 // TODO(user): Parallelize this.
58 for (const ElementIndex element : model()->ElementRange()) {
59 // Minimum marginal cost to cover this element.
60 Cost min_marginal_cost = std::numeric_limits<Cost>::infinity();
61 const SparseRowView& rows = model()->rows();
62 // TODO(user): use std::min_element on rows[element] with a custom
63 // comparator that gets marginal_costs[subset]. Check performance.
64 for (const SubsetIndex subset : rows[element]) {
65 min_marginal_cost = std::min(min_marginal_cost, marginal_costs[subset]);
66 }
67 multipliers[element] = min_marginal_cost;
68 }
69 return multipliers;
70}
71
72namespace {
73// Computes the scalar product between a column and a vector of duals.
74// Profiling has shown that this is where most of the time is spent.
75// TODO(user): make this visible to other algorithms.
76// TODO(user): Investigate.
77Cost ScalarProduct(const SparseColumn& column, const ElementCostVector& dual) {
78 Cost result = 0.0;
79 for (const ColumnEntryIndex pos : column.index_range()) {
80 result += dual[column[pos]];
81 }
82 return result;
83}
84
85// Computes the reduced costs for a subset of subsets.
86// This is a helper function for ParallelComputeReducedCosts().
87// It is called on a slice of subsets, defined by slice_start and slice_end.
88// The reduced costs are computed using the multipliers vector.
89// The columns of the subsets are given by the columns view.
90// The result is stored in reduced_costs.
91void FillReducedCostsSlice(SubsetIndex slice_start, SubsetIndex slice_end,
92 const SubsetCostVector& costs,
93 const ElementCostVector& multipliers,
94 const SparseColumnView& columns,
95 SubsetCostVector* reduced_costs) {
96 for (SubsetIndex subset = slice_start; subset < slice_end; ++subset) {
97 (*reduced_costs)[subset] =
98 costs[subset] - ScalarProduct(columns[subset], multipliers);
99 }
100}
101
102BaseInt BlockSize(BaseInt size, int num_threads) {
103 // Traditional formula to compute std::ceil(size / num_threads).
104 return (size + num_threads - 1) / num_threads;
105}
106} // namespace
107
108// Computes the reduced costs for all subsets in parallel using ThreadPool.
110 const SubsetCostVector& costs, const ElementCostVector& multipliers) const {
111 const SubsetIndex num_subsets(model()->num_subsets());
112 const SparseColumnView& columns = model()->columns();
113 SubsetCostVector reduced_costs(num_subsets);
114 // TODO(user): compute a close-to-optimal k-subset partitioning of the columns
115 // based on their sizes. [***]
116 const SubsetIndex block_size(BlockSize(num_subsets.value(), num_threads_));
117 absl::BlockingCounter num_threads_running(num_threads_);
118 SubsetIndex slice_start(0);
119 for (int thread_index = 0; thread_index < num_threads_; ++thread_index) {
120 const SubsetIndex slice_end =
121 std::min(slice_start + block_size, num_subsets);
122 thread_pool_->Schedule([&num_threads_running, slice_start, slice_end,
123 &costs, &multipliers, &columns, &reduced_costs]() {
124 FillReducedCostsSlice(slice_start, slice_end, costs, multipliers, columns,
125 &reduced_costs);
126 num_threads_running.DecrementCount();
127 });
128 slice_start = slice_end;
129 }
130 num_threads_running.Wait();
131 return reduced_costs;
132}
133
134// Reduced cost (row vector). Denoted as c_j(u) in [1], right after equation
135// (5). For a subset j, c_j(u) = c_j - sum_{i \in I_j}.u_i. I_j is the set of
136// indices for elements in subset j. For a general Integer Program A.x <= b,
137// this would be:
138// c_j(u) = c_j - sum_{i \in I_j} a_{ij}.u_i
140 const SubsetCostVector& costs, const ElementCostVector& multipliers) const {
141 const SparseColumnView& columns = model()->columns();
142 SubsetCostVector reduced_costs(costs.size());
143 FillReducedCostsSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()),
144 costs, multipliers, columns, &reduced_costs);
145 return reduced_costs;
146}
147
148namespace {
149// Helper function to compute the subgradient.
150// It fills a slice of the subgradient vector from indices slice_start to
151// slice_end. This is a helper function for ParallelComputeSubgradient(). The
152// subgradient is computed using the reduced costs vector.
153void FillSubgradientSlice(SubsetIndex slice_start, SubsetIndex slice_end,
154 const SparseColumnView& columns,
155 const SubsetCostVector& reduced_costs,
156 ElementCostVector* subgradient) {
157 for (SubsetIndex subset(slice_start); subset < slice_end; ++subset) {
158 if (reduced_costs[subset] < 0.0) {
159 for (const ElementIndex element : columns[subset]) {
160 (*subgradient)[element] -= 1.0;
161 }
162 }
163 }
164}
165} // namespace
166
167// Vector of primal slack variable. Denoted as s_i(u) in [1], equation (6).
168// For all element indices i, s_i(u) = 1 - sum_{j \in J_i} x_j(u),
169// where J_i denotes the set of indices of subsets j covering element i.
170// For a general Integer Program A x <= b, the subgradient cost vector is
171// defined as A x - b. See [2].
173 const SubsetCostVector& reduced_costs) const {
174 // NOTE(user): Should the initialization be done with coverage[element]?
175 ElementCostVector subgradient(model()->num_elements(), 1.0);
176 FillSubgradientSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()),
177 model()->columns(), reduced_costs, &subgradient);
178 return subgradient;
179}
180
182 const SubsetCostVector& reduced_costs) const {
183 const SubsetIndex num_subsets(model()->num_subsets());
184 const SparseColumnView& columns = model()->columns();
185 ElementCostVector subgradient(model()->num_elements(), 1.0);
186 // The subgradient has one component per element, each thread processes
187 // several subsets. Hence, have one vector per thread to avoid data races.
188 // TODO(user): it may be better to split the elements among the threads,
189 // although this might be less well-balanced.
190 std::vector<ElementCostVector> subgradients(
191 num_threads_, ElementCostVector(model()->num_elements()));
192 absl::BlockingCounter num_threads_running(num_threads_);
193 const SubsetIndex block_size(BlockSize(num_subsets.value(), num_threads_));
194 SubsetIndex slice_start(0);
195 for (int thread_index = 0; thread_index < num_threads_; ++thread_index) {
196 const SubsetIndex slice_end =
197 std::min(slice_start + block_size, num_subsets);
198 thread_pool_->Schedule([&num_threads_running, slice_start, slice_end,
199 &reduced_costs, &columns, &subgradients,
200 thread_index]() {
201 FillSubgradientSlice(slice_start, slice_end, columns, reduced_costs,
202 &subgradients[thread_index]);
203 num_threads_running.DecrementCount();
204 });
205 slice_start = slice_end;
206 }
207 num_threads_running.Wait();
208 for (int thread_index = 0; thread_index < num_threads_; ++thread_index) {
209 for (const ElementIndex element : model()->ElementRange()) {
210 subgradient[element] += subgradients[thread_index][element];
211 }
212 }
213 return subgradient;
214}
215
216namespace {
217// Helper function to compute the value of the Lagrangian.
218// This is a helper function for ParallelComputeLagrangianValue().
219// It is called on a slice of elements, defined by slice_start and slice_end.
220// The value of the Lagrangian is computed using the reduced costs vector and
221// the multipliers vector.
222// The result is stored in lagrangian_value.
223void FillLagrangianValueSlice(SubsetIndex slice_start, SubsetIndex slice_end,
224 const SubsetCostVector& reduced_costs,
225 Cost* lagrangian_value) {
226 // This is min \sum_{j \in N} c_j(u) x_j. This captures the remark above
227 // (**), taking into account the possible values for x_j, and using them to
228 // minimize the terms.
229 for (SubsetIndex subset(slice_start); subset < slice_end; ++subset) {
230 if (reduced_costs[subset] < 0.0) {
231 *lagrangian_value += reduced_costs[subset];
232 }
233 }
234}
235} // namespace
236
237// Compute the (scalar) value of the Lagrangian vector by fixing the value of
238// x_j based on the sign of c_j(u). In [1] equation (4), it is:
239// L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} u_i . This is obtained
240// - if c_j(u) < 0: x_j(u) = 1,
241// - if c_j(u) > 0: x_j(u) = 0, (**)
242// - if c_j(u) = 0: x_j(u) is unbound, in {0, 1}, we use 0.
243// For a general Integer Program A x <= b, the Lagrangian vector L(u) [2] is
244// L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} b_i.u_i.
246 const SubsetCostVector& reduced_costs,
247 const ElementCostVector& multipliers) const {
248 Cost lagrangian_value = 0.0;
249 // This is \sum{i \in M} u_i.
250 for (const Cost u : multipliers) {
251 lagrangian_value += u;
252 }
253 FillLagrangianValueSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()),
254 reduced_costs, &lagrangian_value);
255 return lagrangian_value;
256}
257
259 const SubsetCostVector& reduced_costs,
260 const ElementCostVector& multipliers) const {
261 Cost lagrangian_value = 0.0;
262 // This is \sum{i \in M} u_i.
263 for (const Cost u : multipliers) {
264 lagrangian_value += u;
265 }
266 std::vector<Cost> lagrangian_values(num_threads_, 0.0);
267 absl::BlockingCounter num_threads_running(num_threads_);
268 const SubsetIndex block_size(BlockSize(model()->num_subsets(), num_threads_));
269 const SubsetIndex num_subsets(model()->num_subsets());
270 SubsetIndex slice_start(0);
271 for (int thread_index = 0; thread_index < num_threads_; ++thread_index) {
272 const SubsetIndex slice_end =
273 std::min(slice_start + block_size, num_subsets);
274 thread_pool_->Schedule([&num_threads_running, slice_start, block_size,
275 num_subsets, thread_index, &reduced_costs,
276 &lagrangian_values]() {
277 const SubsetIndex slice_end =
278 std::min(slice_start + block_size, num_subsets);
279 FillLagrangianValueSlice(slice_start, slice_end, reduced_costs,
280 &lagrangian_values[thread_index]);
281 num_threads_running.DecrementCount();
282 });
283 slice_start = slice_end;
284 }
285 num_threads_running.Wait();
286 for (const Cost l : lagrangian_values) {
287 lagrangian_value += l;
288 }
289 return lagrangian_value;
290}
291
292// Perform a subgradient step.
293// In the general case, for an Integer Program A.x <=b, the Lagragian
294// multipliers vector at step k+1 is defined as: u^{k+1} = u^k + t_k (A x^k -
295// b) with term t_k = lambda_k * (UB - L(u^k)) / |A x^k - b|^2.
296// |.| is the 2-norm (i.e. Euclidean)
297// In our case, the problem A x <= b is in the form A x >= 1. We need to
298// replace A x - b by s_i(u) = 1 - sum_{j \in J_i} x_j(u).
299// |A x^k - b|^2 = |s(u)|^2, and t_k is of the form:
300// t_k = lambda_k * (UB - L(u^k)) / |s^k(u)|^2.
301// Now, the coordinates of the multipliers vectors u^k, u^k_i are nonnegative,
302// i.e. u^k_i >= 0. Negative values are simply cut off. Following [3], each of
303// the coordinates is defined as: u^{k+1}_i =
304// max(u^k_i + lambda_k * (UB - L(u^k)) / |s^k(u)|^2 * s^k_i(u), 0).
305// This is eq. (7) in [1].
307 double step_size, Cost lagrangian_value, Cost upper_bound,
308 const SubsetCostVector& reduced_costs,
309 ElementCostVector* multipliers) const {
310 // step_size is \lambda_k in [1].
311 DCHECK_GT(step_size, 0);
312 // Compute the square of the Euclidean norm of the subgradient vector.
313 const ElementCostVector subgradient = ComputeSubgradient(reduced_costs);
314 Cost subgradient_square_norm = 0.0;
315 for (const Cost x : subgradient) {
316 subgradient_square_norm += x * x;
317 }
318 // First compute lambda_k * (UB - L(u^k)).
319 const Cost factor =
320 step_size * (upper_bound - lagrangian_value) / subgradient_square_norm;
321 for (const ElementIndex element : model()->ElementRange()) {
322 // Avoid multipliers to go negative and to go over the roof. 1e6 chosen
323 // arbitrarily. [***]
324 (*multipliers)[element] = std::clamp(
325 (*multipliers)[element] + factor * subgradient[element], 0.0, 1e6);
326 }
327}
328
330 double step_size, Cost lagrangian_value, Cost upper_bound,
331 const SubsetCostVector& reduced_costs,
332 ElementCostVector* multipliers) const {
333 // step_size is \lambda_k in [1].
334 DCHECK_GT(step_size, 0);
335 // Compute the square of the Euclidean norm of the subgradient vector.
336 const ElementCostVector subgradient =
337 ParallelComputeSubgradient(reduced_costs);
338 Cost subgradient_square_norm = 0.0;
339 for (const Cost x : subgradient) {
340 subgradient_square_norm += x * x;
341 }
342 // First compute lambda_k * (UB - L(u^k)).
343 const Cost factor =
344 step_size * (upper_bound - lagrangian_value) / subgradient_square_norm;
345 for (const ElementIndex element : model()->ElementRange()) {
346 // Avoid multipliers to go negative and to go through the roof. 1e6 chosen
347 const Cost kRoof = 1e6; // Arbitrary value, from [1].
348 (*multipliers)[element] = std::clamp(
349 (*multipliers)[element] + factor * subgradient[element], 0.0, kRoof);
350 }
351}
352
354 const SubsetCostVector& reduced_costs, const SubsetBoolVector& solution,
355 const ElementCostVector& multipliers) const {
356 Cost gap = 0.0;
357 // TODO(user): Parallelize this, if need be.
358 for (const SubsetIndex subset : model()->SubsetRange()) {
359 if (solution[subset] && reduced_costs[subset] > 0.0) {
360 gap += reduced_costs[subset];
361 } else if (!solution[subset] && reduced_costs[subset] < 0.0) {
362 // gap += std::abs(reduced_costs[subset]); We know the sign of rhs.
363 gap -= reduced_costs[subset];
364 }
365 }
366 const ElementToIntVector& coverage = inv()->coverage();
367 for (const ElementIndex element : model()->ElementRange()) {
368 gap += (coverage[element] - 1) * multipliers[element];
369 }
370 return gap;
371}
372
373SubsetCostVector SetCoverLagrangian::ComputeDelta(
374 const SubsetCostVector& reduced_costs,
375 const ElementCostVector& multipliers) const {
377 const ElementToIntVector& coverage = inv()->coverage();
378 // This is definition (9) in [1].
379 const SparseColumnView& columns = model()->columns();
380 // TODO(user): Parallelize this.
381 for (const SubsetIndex subset : model()->SubsetRange()) {
382 delta[subset] = std::max(reduced_costs[subset], 0.0);
383 for (const ElementIndex element : columns[subset]) {
384 const double size = coverage[element];
385 delta[subset] += multipliers[element] * (size - 1.0) / size;
386 }
387 }
388 return delta;
389}
390
391namespace {
392// Helper class to compute the step size for the multipliers.
393// The step size is updated every period iterations.
394// The step size is updated by multiplying it by a factor 0.5 or 1.5
395// on the relative change in the lower bound is greater than 0.01 or less
396// than 0.001, respectively. The lower bound is updated every period
397// iterations.
398class StepSizer {
399 public:
400 StepSizer(int period, double step_size)
401 : period_(period), iter_to_check_(period), step_size_(step_size) {
402 ResetBounds();
403 }
404 double UpdateStepSize(int iter, Cost lower_bound) {
405 min_lb_ = std::min(min_lb_, lower_bound);
406 max_lb_ = std::max(max_lb_, lower_bound);
407 if (iter == iter_to_check_) {
408 iter_to_check_ += period_;
409 // Bounds can be negative, so we need to take the absolute value.
410 // We also need to avoid division by zero. We decide to return 0.0 in
411 // that case, which is the same as not updating the step size.
412 const Cost lb_gap =
413 max_lb_ == 0.0 ? 0.0 : (max_lb_ - min_lb_) / std::abs(max_lb_);
414 DCHECK_GE(lb_gap, 0.0);
415 if (lb_gap > 0.01) {
416 step_size_ *= 0.5;
417 } else if (lb_gap <= 0.001) {
418 step_size_ *= 1.5;
419 }
420 step_size_ = std::clamp(step_size_, 1e-6, 10.0);
421 ResetBounds();
422 }
423 return step_size_;
424 }
425
426 private:
427 void ResetBounds() {
428 min_lb_ = std::numeric_limits<Cost>::infinity();
429 max_lb_ = -std::numeric_limits<Cost>::infinity();
430 }
431 int period_;
432 int iter_to_check_;
433 double step_size_;
434 Cost min_lb_;
435 Cost max_lb_;
436};
437
438// Helper class to decide whether to stop the algorithm.
439// The algorithm stops when the lower bound is not updated for a certain
440// number of iterations.
441class Stopper {
442 public:
443 explicit Stopper(int period)
444 : period_(period),
445 iter_to_check_(period),
446 lower_bound_(std::numeric_limits<Cost>::min()) {}
447 bool DecideWhetherToStop(int iter, Cost lb) {
448 DCHECK_GE(lb, lower_bound_);
449 if (iter == iter_to_check_) {
450 iter_to_check_ += period_;
451 const Cost delta = lb - lower_bound_;
452 const Cost relative_delta = delta / lb;
453 DCHECK_GE(delta, 0.0);
454 DCHECK_GE(relative_delta, 0.0);
455 lower_bound_ = lb;
456 return relative_delta < 0.001 && delta < 1;
457 }
458 return false;
459 }
460
461 private:
462 int period_;
463 int iter_to_check_;
464 Cost lower_bound_;
465};
466
467} // namespace
468
469namespace {
470// TODO(user): Add this to the file defining AdjustableKAryHeap.
471template <typename Priority, typename Index, bool IsMaxHeap>
472class TopKHeap {
473 public:
474 explicit TopKHeap(int max_size) : heap_(), max_size_(max_size) {}
475 void Clear() { heap_.Clear(); }
476 void Add(Priority priority, Index index) {
477 if (heap_.Size() < max_size_) {
478 heap_.Add(priority, index);
479 } else if (heap_.HasPriority(priority, heap_.TopPriority())) {
480 heap_.ReplaceTop(priority, index);
481 }
482 }
483
484 private:
485 AdjustableKAryHeap<Priority, Index, /*Arity=*/2, !IsMaxHeap> heap_;
486 int max_size_;
487};
488} // namespace
489
490// Computes a lower bound on the optimal cost.
491std::tuple<Cost, SubsetCostVector, ElementCostVector>
493 Cost upper_bound) {
494 StopWatch stop_watch(&run_time_);
495 Cost lower_bound = 0.0;
497 double step_size = 0.1; // [***] arbitrary, from [1].
498 StepSizer step_sizer(20, step_size); // [***] arbitrary, from [1].
499 Stopper stopper(100); // [***] arbitrary, from [1].
500 SubsetCostVector reduced_costs(costs);
501 // For the time being, 4 threads seems to be the fastest.
502 // Running linux perf of the process shows that up to 60% of the cycles are
503 // lost as idle cycles in the CPU backend, probably because the algorithm is
504 // memory bound.
505 for (int iter = 0; iter < 1000; ++iter) {
506 reduced_costs = ParallelComputeReducedCosts(costs, multipliers);
507 const Cost lagrangian_value =
508 ParallelComputeLagrangianValue(reduced_costs, multipliers);
509 ParallelUpdateMultipliers(step_size, lagrangian_value, upper_bound,
510 reduced_costs, &multipliers);
511 lower_bound = std::max(lower_bound, lagrangian_value);
512 // step_size should be updated like this. For the time besing, we keep the
513 // step size, because the implementation of the rest is not adequate yet
514 // step_size = step_sizer.UpdateStepSize(iter, lagrangian_value);
515 // if (stopper.DecideWhetherToStop(iter, lower_bound)) {
516 // break;
517 // }
518 }
519 inv()->ReportLowerBound(lower_bound, /*is_cost_consistent=*/false);
520 return std::make_tuple(lower_bound, reduced_costs, multipliers);
521}
522
523} // namespace operations_research
void ReportLowerBound(Cost lower_bound, bool is_cost_consistent)
const ElementToIntVector & coverage() const
Returns vector containing number of subsets covering each element.
void UpdateMultipliers(double step_size, Cost lagrangian_value, Cost upper_bound, const SubsetCostVector &reduced_costs, ElementCostVector *multipliers) const
Cost ComputeLagrangianValue(const SubsetCostVector &reduced_costs, const ElementCostVector &multipliers) const
ElementCostVector ComputeSubgradient(const SubsetCostVector &reduced_costs) const
void ParallelUpdateMultipliers(double step_size, Cost lagrangian_value, Cost upper_bound, const SubsetCostVector &reduced_costs, ElementCostVector *multipliers) const
ElementCostVector ParallelComputeSubgradient(const SubsetCostVector &reduced_costs) const
SubsetCostVector ParallelComputeReducedCosts(const SubsetCostVector &costs, const ElementCostVector &multipliers) const
Computes the reduced costs for all subsets in parallel using ThreadPool.
Cost ParallelComputeLagrangianValue(const SubsetCostVector &reduced_costs, const ElementCostVector &multipliers) const
ElementCostVector InitializeLagrangeMultipliers() const
Initializes the multipliers vector (u) based on the cost per subset.
std::tuple< Cost, SubsetCostVector, ElementCostVector > ComputeLowerBound(const SubsetCostVector &costs, Cost upper_bound)
Computes a lower bound on the optimal cost.
SubsetCostVector ComputeReducedCosts(const SubsetCostVector &costs, const ElementCostVector &multipliers) const
Cost ComputeGap(const SubsetCostVector &reduced_costs, const SubsetBoolVector &solution, const ElementCostVector &multipliers) const
const SparseRowView & rows() const
Row view of the set covering problem.
const SubsetCostVector & subset_costs() const
Vector of costs for each subset.
const SparseColumnView & columns() const
Column view of the set covering problem.
absl::Duration run_time_
run_time_ is an abstract duration for the time spent in NextSolution().
StrongIntRange< IntType > index_range() const
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:52
In SWIG mode, we don't want anything besides these top-level includes.
util_intops::StrongVector< SubsetIndex, Cost > SubsetCostVector
Definition base_types.h:58
Select next search node to expand Select next item_i to add this new search node to the search Generate a new search node where item_i is not in the knapsack Check validity of this new partial solution(using propagators) - If valid
util_intops::StrongIntRange< SubsetIndex > SubsetRange
Definition base_types.h:54
util_intops::StrongVector< ElementIndex, Cost > ElementCostVector
Definition base_types.h:59
util_intops::StrongVector< ElementIndex, BaseInt > ElementToIntVector
Definition base_types.h:64
util_intops::StrongVector< SubsetIndex, bool > SubsetBoolVector
Definition base_types.h:71
util_intops::StrongVector< SubsetIndex, SparseColumn > SparseColumnView
Views of the sparse vectors.
Definition base_types.h:68
double Cost
Basic non-strict type for cost. The speed penalty for using double is ~2%.
Definition base_types.h:32
util_intops::StrongIntRange< ElementIndex > ElementRange
Definition base_types.h:55
util_intops::StrongVector< ColumnEntryIndex, ElementIndex > SparseColumn
Definition base_types.h:61
util_intops::StrongVector< ElementIndex, SparseRow > SparseRowView
Definition base_types.h:69