Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
set_cover_model.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstddef>
19#include <cstdint>
20#include <limits>
21#include <numeric>
22#include <tuple>
23#include <utility>
24#include <vector>
25
26#include "absl/log/check.h"
27#include "absl/numeric/bits.h"
28#include "absl/random/discrete_distribution.h"
29#include "absl/random/distributions.h"
30#include "absl/random/random.h"
31#include "absl/strings/str_format.h"
32#include "absl/types/span.h"
34#include "ortools/algorithms/set_cover.pb.h"
36
37namespace operations_research {
38
39namespace {
40
41// Returns a value in [min, min + scaling_factor * (raw_value - min +
42// random_term)], where raw_value is drawn from a discrete distribution, and
43// random_term is a double drawn uniformly in [0, 1].
44BaseInt DiscreteAffine(absl::BitGen& bitgen,
45 absl::discrete_distribution<BaseInt>& dist, BaseInt min,
46 double scaling_factor) {
47 const BaseInt raw_value = dist(bitgen);
48 const double random_term = absl::Uniform<double>(bitgen, 0, 1.0);
49 const BaseInt affine_value =
50 static_cast<BaseInt>(
51 std::floor((raw_value - min + random_term) * scaling_factor)) +
52 min;
53 return affine_value;
54}
55
56// For a given view (SparseColumnView or SparseRowView), returns the
57// distribution of the sizes of the vectors in the view, which can be used in
58// an absl::discrete_distribution.
59template <typename View>
60std::tuple<BaseInt, std::vector<BaseInt>> ComputeSizeHistogram(
61 const View& view) {
62 BaseInt max_size = 0;
63 BaseInt min_size = std::numeric_limits<BaseInt>::max();
64 for (const auto& vec : view) {
65 const BaseInt size = vec.size();
66 min_size = std::min(min_size, size);
67 max_size = std::max(max_size, size);
68 }
69 std::vector<BaseInt> weights(max_size + 1, 0);
70 for (const auto& vec : view) {
71 const BaseInt size = vec.size();
72 ++weights[size];
73 }
74 return {min_size, weights};
75}
76
77template <typename View>
78std::tuple<BaseInt, absl::discrete_distribution<BaseInt>>
79ComputeSizeDistribution(const View& view) {
80 const auto [min_size, weights] = ComputeSizeHistogram(view);
81 absl::discrete_distribution<BaseInt> dist(weights.begin(), weights.end());
82 return {min_size, dist};
83}
84} // namespace
85
88 double row_scale, double column_scale, double cost_scale) {
89 SetCoverModel model;
90 DCHECK_GT(row_scale, 0.0);
91 DCHECK_GT(column_scale, 0.0);
92 DCHECK_GT(cost_scale, 0.0);
93 model.num_elements_ = num_elements;
94 model.num_nonzeros_ = 0;
96 model.UpdateAllSubsetsList();
97 absl::BitGen bitgen;
98
99 // Create the distribution of the cardinalities of the subsets based on the
100 // histogram of column sizes in the seed model.
101 auto [min_column_size, column_dist] =
102 ComputeSizeDistribution(seed_model.columns());
103
104 // Create the distribution of the degrees of the elements based on the
105 // histogram of row sizes in the seed model.
106 auto [min_row_size, row_dist] = ComputeSizeDistribution(seed_model.rows());
107
108 // Prepare the degrees of the elements in the generated model, and use them
109 // in a distribution to generate the columns. This ponderates the columns
110 // towards the elements with higher degrees. ???
112 for (ElementIndex element(0); element.value() < num_elements; ++element) {
113 degrees[element] =
114 DiscreteAffine(bitgen, row_dist, min_row_size, row_scale);
115 }
116 absl::discrete_distribution<BaseInt> degree_dist(degrees.begin(),
117 degrees.end());
118
119 // Vector indicating whether the generated model covers an element.
120 ElementBoolVector contains_element(num_elements, false);
121
122 // Number of elements in the generated model, using the above vector.
123 BaseInt num_elements_covered(0);
124
125 // Maximum number of tries to generate a random element that is not yet in
126 // the subset, or a random subset that does not contain the element.
127 const int kMaxTries = 10;
128
129 // Loop-local vector indicating whether the currently generated subset
130 // contains an element.
131 ElementBoolVector subset_already_contains_element(num_elements, false);
132 for (SubsetIndex subset(0); subset.value() < num_subsets; ++subset) {
133 LOG_EVERY_N_SEC(INFO, 5)
134 << absl::StrFormat("Generating subset %d (%.1f%%)", subset.value(),
135 100.0 * subset.value() / num_subsets);
136 const BaseInt cardinality =
137 DiscreteAffine(bitgen, column_dist, min_column_size, column_scale);
138 model.columns_[subset].reserve(cardinality);
139 for (BaseInt iter = 0; iter < cardinality; ++iter) {
140 int num_tries = 0;
141 ElementIndex element;
142 // Choose an element that is not yet in the subset at random with a
143 // distribution that is proportional to the degree of the element.
144 do {
145 element = ElementIndex(degree_dist(bitgen));
146 CHECK_LT(element.value(), num_elements);
147 ++num_tries;
148 } while (num_tries < kMaxTries &&
149 subset_already_contains_element[element]);
150 ++model.num_nonzeros_;
151 model.columns_[subset].push_back(element);
152 subset_already_contains_element[element] = true;
153 if (!contains_element[element]) {
154 contains_element[element] = true;
155 ++num_elements_covered;
156 }
157 }
158 for (const ElementIndex element : model.columns_[subset]) {
159 subset_already_contains_element[element] = false;
160 }
161 }
162 LOG(INFO) << "Finished genreating the model with " << num_elements_covered
163 << " elements covered.";
164
165 // It can happen -- rarely in practice -- that some of the elements cannot be
166 // covered. Let's add them to randomly chosen subsets.
167 if (num_elements_covered != num_elements) {
168 LOG(INFO) << "Generated model with " << num_elements - num_elements_covered
169 << " elements that cannot be covered. Adding them to random "
170 "subsets.";
171 SubsetBoolVector element_already_in_subset(num_subsets, false);
172 for (ElementIndex element(0); element.value() < num_elements; ++element) {
173 LOG_EVERY_N_SEC(INFO, 5) << absl::StrFormat(
174 "Generating subsets for element %d (%.1f%%)", element.value(),
175 100.0 * element.value() / num_elements);
176 if (!contains_element[element]) {
177 const BaseInt degree =
178 DiscreteAffine(bitgen, row_dist, min_row_size, row_scale);
179 std::vector<SubsetIndex> generated_subsets;
180 generated_subsets.reserve(degree);
181 for (BaseInt i = 0; i < degree; ++i) {
182 int num_tries = 0;
183 SubsetIndex subset_index;
184 // The method is the same as above.
185 do {
186 subset_index = SubsetIndex(DiscreteAffine(
187 bitgen, column_dist, min_column_size, column_scale));
188 ++num_tries;
189 } while (num_tries < kMaxTries &&
190 element_already_in_subset[subset_index]);
191 ++model.num_nonzeros_;
192 model.columns_[subset_index].push_back(element);
193 element_already_in_subset[subset_index] = true;
194 generated_subsets.push_back(subset_index);
195 }
196 for (const SubsetIndex subset_index : generated_subsets) {
197 element_already_in_subset[subset_index] = false;
198 }
199 contains_element[element] = true;
200 ++num_elements_covered;
201 }
202 }
203 LOG(INFO) << "Finished generating subsets for elements that were not "
204 "covered in the original model.";
205 }
206 LOG(INFO) << "Finished generating the model. There are "
207 << num_elements - num_elements_covered << " uncovered elements.";
208
209 CHECK_EQ(num_elements_covered, num_elements);
210
211 // TODO(user): if necessary, use a better distribution for the costs.
212 // The generation of the costs is done in two steps. First, compute the
213 // minimum and maximum costs.
214 Cost min_cost = std::numeric_limits<Cost>::infinity();
215 Cost max_cost = -min_cost;
216 for (const Cost cost : seed_model.subset_costs()) {
217 min_cost = std::min(min_cost, cost);
218 max_cost = std::max(max_cost, cost);
219 }
220 // Then, generate random numbers in [min_cost, min_cost + cost_range], where
221 // cost_range is defined as:
222 const Cost cost_range = cost_scale * (max_cost - min_cost);
223 for (Cost& cost : model.subset_costs_) {
224 cost = min_cost + absl::Uniform<double>(bitgen, 0, cost_range);
225 }
226 model.CreateSparseRowView();
227 return model;
228}
229
230void SetCoverModel::UpdateAllSubsetsList() {
231 const BaseInt old_size = all_subsets_.size();
232 DCHECK_LE(old_size, num_subsets());
233 all_subsets_.resize(num_subsets());
234 for (BaseInt subset(old_size); subset < num_subsets(); ++subset) {
235 all_subsets_[subset] = SubsetIndex(subset);
236 }
237}
238
240 elements_in_subsets_are_sorted_ = false;
241 subset_costs_.push_back(cost);
242 columns_.push_back(SparseColumn());
243 all_subsets_.push_back(SubsetIndex(num_subsets_));
244 ++num_subsets_;
245 CHECK_EQ(columns_.size(), num_subsets());
246 CHECK_EQ(subset_costs_.size(), num_subsets());
247 CHECK_EQ(all_subsets_.size(), num_subsets());
248 row_view_is_valid_ = false;
249}
250
252 elements_in_subsets_are_sorted_ = false;
253 columns_.back().push_back(ElementIndex(element));
254 num_elements_ = std::max(num_elements_, element + 1);
255 // No need to update the list all_subsets_.
256 ++num_nonzeros_;
257 row_view_is_valid_ = false;
258}
259
260void SetCoverModel::AddElementToLastSubset(ElementIndex element) {
261 AddElementToLastSubset(element.value());
262}
263
265 elements_in_subsets_are_sorted_ = false;
266 CHECK(std::isfinite(cost));
267 DCHECK_GE(subset, 0);
268 if (subset >= num_subsets()) {
269 num_subsets_ = std::max(num_subsets_, subset + 1);
270 columns_.resize(num_subsets_, SparseColumn());
271 subset_costs_.resize(num_subsets_, 0.0);
272 row_view_is_valid_ = false;
273 UpdateAllSubsetsList();
274 }
275 subset_costs_[SubsetIndex(subset)] = cost;
276}
277
278void SetCoverModel::SetSubsetCost(SubsetIndex subset, Cost cost) {
279 SetSubsetCost(subset.value(), cost);
280}
281
283 elements_in_subsets_are_sorted_ = false;
284 if (subset >= num_subsets()) {
285 num_subsets_ = subset + 1;
286 subset_costs_.resize(num_subsets_, 0.0);
287 columns_.resize(num_subsets_, SparseColumn());
288 UpdateAllSubsetsList();
289 }
290 columns_[SubsetIndex(subset)].push_back(ElementIndex(element));
291 num_elements_ = std::max(num_elements_, element + 1);
292 ++num_nonzeros_;
293 row_view_is_valid_ = false;
294}
295
296void SetCoverModel::AddElementToSubset(ElementIndex element,
297 SubsetIndex subset) {
298 AddElementToSubset(element.value(), subset.value());
299}
300
301// Reserves num_subsets columns in the model.
303 num_subsets_ = std::max(num_subsets_, num_subsets);
304 columns_.resize(num_subsets_, SparseColumn());
305 subset_costs_.resize(num_subsets_, 0.0);
306 UpdateAllSubsetsList();
307}
308
312
313// Reserves num_elements rows in the column indexed by subset.
315 BaseInt subset) {
316 ReserveNumSubsets(subset);
317 columns_[SubsetIndex(subset)].reserve(ColumnEntryIndex(num_elements));
318}
319
321 SubsetIndex subset) {
322 ReserveNumElementsInSubset(num_elements.value(), subset.value());
323}
324
326 for (const SubsetIndex subset : SubsetRange()) {
327 // std::sort(columns_[subset].begin(), columns_[subset].end());
328 BaseInt* data = reinterpret_cast<BaseInt*>(columns_[subset].data());
329 RadixSort(absl::MakeSpan(data, columns_[subset].size()));
330 }
331 elements_in_subsets_are_sorted_ = true;
332}
333
335 if (row_view_is_valid_) {
336 return;
337 }
338 rows_.resize(num_elements_, SparseRow());
339 ElementToIntVector row_sizes(num_elements_, 0);
340 for (const SubsetIndex subset : SubsetRange()) {
341 // Sort the columns. It's not super-critical to improve performance here
342 // as this needs to be done only once.
343 BaseInt* data = reinterpret_cast<BaseInt*>(columns_[subset].data());
344 RadixSort(absl::MakeSpan(data, columns_[subset].size()));
345
346 ElementIndex preceding_element(-1);
347 for (const ElementIndex element : columns_[subset]) {
348 DCHECK_GT(element, preceding_element); // Fail if there is a repetition.
349 ++row_sizes[element];
350 preceding_element = element;
351 }
352 }
353 for (const ElementIndex element : ElementRange()) {
354 rows_[element].reserve(RowEntryIndex(row_sizes[element]));
355 }
356 for (const SubsetIndex subset : SubsetRange()) {
357 for (const ElementIndex element : columns_[subset]) {
358 rows_[element].emplace_back(subset);
359 }
360 }
361 row_view_is_valid_ = true;
362 elements_in_subsets_are_sorted_ = true;
363}
364
366 CHECK_GT(num_elements(), 0);
367 CHECK_GT(num_subsets(), 0);
368 CHECK_EQ(columns_.size(), num_subsets());
369 CHECK_EQ(subset_costs_.size(), num_subsets());
370 CHECK_EQ(all_subsets_.size(), num_subsets());
371 ElementToIntVector coverage(num_elements_, 0);
372 for (const Cost cost : subset_costs_) {
373 CHECK_GT(cost, 0.0);
374 }
375 for (const SparseColumn& column : columns_) {
376 CHECK_GT(column.size(), 0);
377 for (const ElementIndex element : column) {
378 ++coverage[element];
379 }
380 }
381 for (const ElementIndex element : ElementRange()) {
382 CHECK_GE(coverage[element], 0);
383 if (coverage[element] == 0) {
384 return false;
385 }
386 }
387 VLOG(1) << "Max possible coverage = "
388 << *std::max_element(coverage.begin(), coverage.end());
389 for (const SubsetIndex subset : SubsetRange()) {
390 CHECK_EQ(all_subsets_[subset.value()], subset) << "subset = " << subset;
391 }
392 return true;
393}
394
396 CHECK(elements_in_subsets_are_sorted_);
397 SetCoverProto message;
398 for (const SubsetIndex subset : SubsetRange()) {
399 LOG_EVERY_N_SEC(INFO, 5)
400 << absl::StrFormat("Exporting subset %d (%.1f%%)", subset.value(),
401 100.0 * subset.value() / num_subsets());
402 SetCoverProto::Subset* subset_proto = message.add_subset();
403 subset_proto->set_cost(subset_costs_[subset]);
404 SparseColumn column = columns_[subset]; // Copy is intentional.
405 // std::sort(column.begin(), column.end());
406 BaseInt* data = reinterpret_cast<BaseInt*>(column.data());
407 RadixSort(absl::MakeSpan(data, column.size()));
408 for (const ElementIndex element : column) {
409 subset_proto->add_element(element.value());
410 }
411 }
412 LOG(INFO) << "Finished exporting the model.";
413 return message;
414}
415
416void SetCoverModel::ImportModelFromProto(const SetCoverProto& message) {
417 columns_.clear();
418 subset_costs_.clear();
419 ReserveNumSubsets(message.subset_size());
420 SubsetIndex subset_index(0);
421 for (const SetCoverProto::Subset& subset_proto : message.subset()) {
422 subset_costs_[subset_index] = subset_proto.cost();
423 if (subset_proto.element_size() > 0) {
424 columns_[subset_index].reserve(
425 ColumnEntryIndex(subset_proto.element_size()));
426 for (const BaseInt element : subset_proto.element()) {
427 columns_[subset_index].push_back(ElementIndex(element));
428 num_elements_ = std::max(num_elements_, element + 1);
429 }
430 ++subset_index;
431 }
432 }
433 UpdateAllSubsetsList();
435}
436
437namespace {
438// Returns the standard deviation of the vector v, excluding those values that
439// are zero.
440template <typename T>
441double StandardDeviation(const std::vector<T>& values) {
442 const size_t size = values.size();
443 double n = 0.0; // n is used in a calculation involving doubles.
444 double sum_of_squares = 0.0;
445 double sum = 0.0;
446 for (size_t i = 0; i < size; ++i) {
447 double sample = static_cast<double>(values[i]);
448 if (sample == 0.0) continue;
449 sum_of_squares += sample * sample;
450 sum += sample;
451 ++n;
452 }
453 // Since we know all the values, we can compute the standard deviation
454 // exactly.
455 return n == 0.0 ? 0.0 : sqrt((sum_of_squares - sum * sum / n) / n);
456}
457
458// Statistics accumulation class used to compute statistics on the deltas of
459// the row and column elements and their sizes in bytes.
460// Since the values are not all stored, it's not possible to compute the median
461// exactly. It is returned as 0.0. NaN would be a better choice, but it's just
462// not a good idea as NaNs can propagate and cause problems.
463class StatsAccumulator {
464 public:
465 StatsAccumulator()
466 : count_(0),
467 min_(kInfinity),
468 max_(-kInfinity),
469 sum_(0.0),
470 sum_of_squares_(0.0) {}
471
472 void Register(double value) {
473 ++count_;
474 min_ = std::min(min_, value);
475 max_ = std::max(max_, value);
476 sum_ += value;
477 sum_of_squares_ += value * value;
478 }
479
480 SetCoverModel::Stats ComputeStats() const {
481 const int64_t n = count_;
482 // Since the code is used on a known number of values, we can compute the
483 // standard deviation exactly, even if the values are not all stored.
484 const double stddev =
485 n == 0 ? 0.0 : sqrt((sum_of_squares_ - sum_ * sum_ / n) / n);
486 return SetCoverModel::Stats{min_, max_, 0.0, sum_ / n, stddev};
487 }
488
489 private:
490 static constexpr double kInfinity = std::numeric_limits<double>::infinity();
491 int64_t count_;
492 double min_;
493 double max_;
494 double sum_;
495 double sum_of_squares_;
496};
497} // namespace
498
499template <typename T>
500SetCoverModel::Stats ComputeStats(std::vector<T> sizes) {
502 stats.min = *std::min_element(sizes.begin(), sizes.end());
503 stats.max = *std::max_element(sizes.begin(), sizes.end());
504 stats.mean = std::accumulate(sizes.begin(), sizes.end(), 0.0) / sizes.size();
505 std::nth_element(sizes.begin(), sizes.begin() + sizes.size() / 2,
506 sizes.end());
507 stats.median = sizes[sizes.size() / 2];
508 stats.stddev = StandardDeviation(sizes);
509 return stats;
510}
511
512template <typename T>
513std::vector<T> ComputeDeciles(std::vector<T> values) {
514 const int kNumDeciles = 10;
515 std::vector<T> deciles(kNumDeciles, T{0});
516 const double step = values.size() / kNumDeciles;
517 for (int i = 1; i < kNumDeciles; ++i) {
518 const size_t point = std::clamp<double>(i * step, 0, values.size() - 1);
519 std::nth_element(values.begin(), values.begin() + point, values.end());
520 deciles[i] = values[point];
521 }
522 return deciles;
523}
524
526 std::vector<Cost> subset_costs(num_subsets());
527 std::copy(subset_costs_.begin(), subset_costs_.end(), subset_costs.begin());
528 return ComputeStats(std::move(subset_costs));
529}
530
532 std::vector<int64_t> row_sizes(num_elements(), 0);
533 for (const SparseColumn& column : columns_) {
534 for (const ElementIndex element : column) {
535 ++row_sizes[element.value()];
536 }
537 }
538 return ComputeStats(std::move(row_sizes));
539}
540
542 std::vector<int64_t> column_sizes(columns_.size());
543 for (const SubsetIndex subset : SubsetRange()) {
544 column_sizes[subset.value()] = columns_[subset].size();
545 }
546 return ComputeStats(std::move(column_sizes));
547}
548
549std::vector<int64_t> SetCoverModel::ComputeRowDeciles() const {
550 std::vector<int64_t> row_sizes(num_elements(), 0);
551 for (const SparseColumn& column : columns_) {
552 for (const ElementIndex element : column) {
553 ++row_sizes[element.value()];
554 }
555 }
556 return ComputeDeciles(std::move(row_sizes));
557}
558
559std::vector<int64_t> SetCoverModel::ComputeColumnDeciles() const {
560 std::vector<int64_t> column_sizes(columns_.size());
561 for (const SubsetIndex subset : SubsetRange()) {
562 column_sizes[subset.value()] = columns_[subset].size();
563 }
564 return ComputeDeciles(std::move(column_sizes));
565}
566
567namespace {
568// Returns the number of bytes needed to store x with a base-128 encoding.
569BaseInt Base128SizeInBytes(BaseInt x) {
570 const uint64_t u = x == 0 ? 1 : static_cast<uint64_t>(x);
571 return (std::numeric_limits<uint64_t>::digits - absl::countl_zero(u) + 6) / 7;
572}
573} // namespace
574
576 StatsAccumulator acc;
577 for (const SparseColumn& column : columns_) {
578 int64_t previous = 0;
579 for (const ElementIndex element : column) {
580 const int64_t delta = element.value() - previous;
581 previous = element.value();
582 acc.Register(Base128SizeInBytes(delta));
583 }
584 }
585 return acc.ComputeStats();
586}
587
589 StatsAccumulator acc;
590 for (const SparseRow& row : rows_) {
591 int64_t previous = 0;
592 for (const SubsetIndex subset : row) {
593 const int64_t delta = subset.value() - previous;
594 previous = subset.value();
595 acc.Register(Base128SizeInBytes(delta));
596 }
597 }
598 return acc.ComputeStats();
599}
600
601} // namespace operations_research
void ImportModelFromProto(const SetCoverProto &message)
Imports the model from a SetCoverProto.
static SetCoverModel GenerateRandomModelFrom(const SetCoverModel &seed_model, BaseInt num_elements, BaseInt num_subsets, double row_scale, double column_scale, double cost_scale)
void ReserveNumElementsInSubset(BaseInt num_elements, BaseInt subset)
Reserves num_elements rows in the column indexed by subset.
Stats ComputeColumnStats()
Computes basic statistics on columns and returns a Stats structure.
std::vector< int64_t > ComputeRowDeciles() const
Computes deciles on rows and returns a vector of deciles.
util_intops::StrongIntRange< ElementIndex > ElementRange() const
void AddElementToLastSubset(BaseInt element)
Stats ComputeCostStats()
Computes basic statistics on costs and returns a Stats structure.
Stats ComputeRowStats()
Computes basic statistics on rows and returns a Stats structure.
const SparseRowView & rows() const
Row view of the set covering problem.
void ReserveNumSubsets(BaseInt num_subsets)
Reserves num_subsets columns in the model.
util_intops::StrongIntRange< SubsetIndex > SubsetRange() const
Access to the ranges of subsets and elements.
void AddElementToSubset(BaseInt element, BaseInt subset)
void SetSubsetCost(BaseInt subset, Cost cost)
const SubsetCostVector & subset_costs() const
Vector of costs for each subset.
SetCoverProto ExportModelAsProto() const
SetCoverModel()
Constructs an empty weighted set-covering problem.
std::vector< int64_t > ComputeColumnDeciles() const
Computes deciles on columns and returns a vector of deciles.
const SparseColumnView & columns() const
Column view of the set covering problem.
value_type * data()
– Pass-through methods to STL vector ----------------------------------—
void push_back(const value_type &val)
void reserve(size_type n)
In SWIG mode, we don't want anything besides these top-level includes.
util_intops::StrongVector< RowEntryIndex, SubsetIndex > SparseRow
util_intops::StrongVector< ElementIndex, BaseInt > ElementToIntVector
util_intops::StrongVector< SubsetIndex, bool > SubsetBoolVector
util_intops::StrongVector< ElementIndex, bool > ElementBoolVector
double Cost
Basic non-strict type for cost. The speed penalty for using double is ~2%.
std::vector< T > ComputeDeciles(std::vector< T > values)
void RadixSort(absl::Span< T > values)
Definition radix_sort.h:247
util_intops::StrongVector< ColumnEntryIndex, ElementIndex > SparseColumn
SetCoverModel::Stats ComputeStats(std::vector< T > sizes)
trees with all degrees equal w the current value of degrees