Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
util.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/util.h"
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <deque>
20#include <limits>
21#include <numeric>
22#include <string>
23#include <utility>
24#include <vector>
25
26#include "absl/algorithm/container.h"
27#include "absl/container/btree_set.h"
28#include "absl/log/check.h"
29#include "absl/numeric/bits.h"
30#include "absl/numeric/int128.h"
31#include "absl/random/bit_gen_ref.h"
32#include "absl/random/distributions.h"
33#include "absl/strings/str_cat.h"
34#include "absl/types/span.h"
35#include "google/protobuf/descriptor.h"
42
43namespace operations_research {
44namespace sat {
45
46namespace {
47
48inline std::string LeftAlign(std::string s, int size = 16) {
49 if (s.size() >= size) return s;
50 s.resize(size, ' ');
51 return s;
52}
53
54inline std::string RightAlign(std::string s, int size = 16) {
55 if (s.size() >= size) return s;
56 return absl::StrCat(std::string(size - s.size(), ' '), s);
57}
58
59} // namespace
60
61std::string FormatTable(std::vector<std::vector<std::string>>& table,
62 int spacing) {
63 if (table.size() > 1) {
64 // We order by name.
65 std::sort(table.begin() + 1, table.end());
66 }
67
68 std::vector<int> widths;
69 for (const std::vector<std::string>& line : table) {
70 if (line.size() > widths.size()) widths.resize(line.size(), spacing);
71 for (int j = 0; j < line.size(); ++j) {
72 widths[j] = std::max<int>(widths[j], line[j].size() + spacing);
73 }
74 }
75 std::string output;
76 for (int i = 0; i < table.size(); ++i) {
77 for (int j = 0; j < table[i].size(); ++j) {
78 if (i == 0 && j == 0) {
79 // We currently only left align the table name.
80 absl::StrAppend(&output, LeftAlign(table[i][j], widths[j]));
81 } else {
82 absl::StrAppend(&output, RightAlign(table[i][j], widths[j]));
83 }
84 }
85 absl::StrAppend(&output, "\n");
86 }
87 return output;
88}
89
90void RandomizeDecisionHeuristic(absl::BitGenRef random,
91 SatParameters* parameters) {
92#if !defined(__PORTABLE_PLATFORM__)
93 // Random preferred variable order.
94 const google::protobuf::EnumDescriptor* order_d =
98 order_d->value(absl::Uniform(random, 0, order_d->value_count()))
99 ->number()));
100
101 // Random polarity initial value.
102 const google::protobuf::EnumDescriptor* polarity_d =
104 parameters->set_initial_polarity(static_cast<SatParameters::Polarity>(
105 polarity_d->value(absl::Uniform(random, 0, polarity_d->value_count()))
106 ->number()));
107#endif // __PORTABLE_PLATFORM__
108 // Other random parameters.
109 parameters->set_use_phase_saving(absl::Bernoulli(random, 0.5));
110 parameters->set_random_polarity_ratio(absl::Bernoulli(random, 0.5) ? 0.01
111 : 0.0);
112 parameters->set_random_branches_ratio(absl::Bernoulli(random, 0.5) ? 0.01
113 : 0.0);
114}
115
116namespace {
117
118// This will be optimized into one division. I tested that in
119// `BM_DivisionAndRemainderAlternative` (sat/integer_test.cc)
120//
121// Note that I am not 100% sure we need the indirection for the optimization
122// to kick in though, but this seemed safer given our weird r[i ^ 1] inputs.
123void QuotientAndRemainder(int64_t a, int64_t b, int64_t& q, int64_t& r) {
124 q = a / b;
125 r = a % b;
126}
127
128} // namespace
129
130// Using the extended Euclidean algo, we find a and b such that
131// a x + b m = gcd(x, m)
132// https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
133int64_t ModularInverse(int64_t x, int64_t m) {
134 DCHECK_GE(x, 0);
135 DCHECK_LT(x, m);
136
137 int64_t r[2] = {m, x};
138 int64_t t[2] = {0, 1};
139 int64_t q;
140
141 // We only keep the last two terms of the sequences with the "^1" trick:
142 //
143 // q = r[i-2] / r[i-1]
144 // r[i] = r[i-2] % r[i-1]
145 // t[i] = t[i-2] - t[i-1] * q
146 //
147 // We always have:
148 // - gcd(r[i], r[i - 1]) = gcd(r[i - 1], r[i - 2])
149 // - x * t[i] + m * t[i - 1] = r[i]
150 int i = 0;
151 for (; r[i ^ 1] != 0; i ^= 1) {
152 QuotientAndRemainder(r[i], r[i ^ 1], q, r[i]);
153 t[i] -= t[i ^ 1] * q;
154 }
155
156 // If the gcd is not one, there is no inverse, we returns 0.
157 if (r[i] != 1) return 0;
158
159 // Correct the result so that it is in [0, m). Note that abs(t[i]) is known to
160 // be less than or equal to x / 2, and we have thorough unit-tests.
161 if (t[i] < 0) t[i] += m;
162
163 return t[i];
164}
165
166int64_t PositiveMod(int64_t x, int64_t m) {
167 const int64_t r = x % m;
168 return r < 0 ? r + m : r;
169}
170
171int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs) {
172 DCHECK_NE(coeff, 0);
173 DCHECK_NE(mod, 0);
174
175 mod = std::abs(mod);
176 if (rhs == 0 || mod == 1) return 0;
177 DCHECK_EQ(std::gcd(std::abs(coeff), mod), 1);
178
179 // Make both in [0, mod).
180 coeff = PositiveMod(coeff, mod);
181 rhs = PositiveMod(rhs, mod);
182
183 // From X * coeff % mod = rhs
184 // We deduce that X % mod = rhs * inverse % mod
185 const int64_t inverse = ModularInverse(coeff, mod);
186 CHECK_NE(inverse, 0);
187
188 // We make the operation in 128 bits to be sure not to have any overflow here.
189 const absl::int128 p = absl::int128{inverse} * absl::int128{rhs};
190 return static_cast<int64_t>(p % absl::int128{mod});
191}
192
193bool SolveDiophantineEquationOfSizeTwo(int64_t& a, int64_t& b, int64_t& cte,
194 int64_t& x0, int64_t& y0) {
195 CHECK_NE(a, 0);
196 CHECK_NE(b, 0);
197 CHECK_NE(a, std::numeric_limits<int64_t>::min());
198 CHECK_NE(b, std::numeric_limits<int64_t>::min());
199
200 const int64_t gcd = std::gcd(std::abs(a), std::abs(b));
201 if (cte % gcd != 0) return false;
202 a /= gcd;
203 b /= gcd;
204 cte /= gcd;
205
206 // The simple case where (0, 0) is a solution.
207 if (cte == 0) {
208 x0 = y0 = 0;
209 return true;
210 }
211
212 // We solve a * X + b * Y = cte
213 // We take a valid x0 in [0, b) by considering the equation mod b.
214 x0 = ProductWithModularInverse(a, b, cte);
215
216 // We choose x0 of the same sign as cte.
217 if (cte < 0 && x0 != 0) x0 -= std::abs(b);
218
219 // By plugging X = x0 + b * Z
220 // We have a * (x0 + b * Z) + b * Y = cte
221 // so a * b * Z + b * Y = cte - a * x0;
222 // and y0 = (cte - a * x0) / b (with an exact division by construction).
223 const absl::int128 t = absl::int128{cte} - absl::int128{a} * absl::int128{x0};
224 DCHECK_EQ(t % absl::int128{b}, absl::int128{0});
225
226 // Overflow-wise, there is two cases for cte > 0:
227 // - a * x0 <= cte, in this case y0 will not overflow (<= cte).
228 // - a * x0 > cte, in this case y0 will be in (-a, 0].
229 const absl::int128 r = t / absl::int128{b};
230 DCHECK_LE(r, absl::int128{std::numeric_limits<int64_t>::max()});
231 DCHECK_GE(r, absl::int128{std::numeric_limits<int64_t>::min()});
232
233 y0 = static_cast<int64_t>(r);
234 return true;
235}
236
238 const Domain& y, int64_t b,
239 int64_t cte) {
240 if (x.IsEmpty() || y.IsEmpty()) return false;
241 if (a == 0 && b == 0) {
242 return cte == 0;
243 }
244 if (a == 0) {
245 const int64_t div = cte / b;
246 if (b * div != cte) return false;
247 return y.Contains(div);
248 }
249 if (b == 0) {
250 const int64_t div = cte / a;
251 if (a * div != cte) return false;
252 return x.Contains(div);
253 }
254
255 if (a == b) {
256 const int64_t div = cte / a;
257 if (a * div != cte) {
258 return false;
259 }
260 return !Domain(x)
261 .Negation()
262 .AdditionWith(Domain(div))
264 .IsEmpty();
265 }
266
267 int64_t x0, y0;
268 if (!SolveDiophantineEquationOfSizeTwo(a, b, cte, x0, y0)) {
269 return false;
270 }
271 const Domain z_domain =
272 x.AdditionWith(Domain(-x0))
273 .InverseMultiplicationBy(b)
274 .IntersectionWith(
275 y.AdditionWith(Domain(-y0)).InverseMultiplicationBy(-a));
276
277 const Domain z_restricted_d1 =
278 x.AdditionWith(Domain(-x0)).InverseMultiplicationBy(b);
279 const Domain z_restricted_d2 =
281 const Domain z_restricted_domain =
282 z_restricted_d1.IntersectionWith(z_restricted_d2);
283 return !z_restricted_domain.IsEmpty();
284}
285
286// TODO(user): Find better implementation? In practice passing via double is
287// almost always correct, but the CapProd() might be a bit slow. However this
288// is only called when we do propagate something.
289int64_t FloorSquareRoot(int64_t a) {
290 int64_t result =
291 static_cast<int64_t>(std::floor(std::sqrt(static_cast<double>(a))));
292 while (CapProd(result, result) > a) --result;
293 while (CapProd(result + 1, result + 1) <= a) ++result;
294 return result;
295}
296
297// TODO(user): Find better implementation?
298int64_t CeilSquareRoot(int64_t a) {
299 int64_t result =
300 static_cast<int64_t>(std::ceil(std::sqrt(static_cast<double>(a))));
301 while (CapProd(result, result) < a) ++result;
302 while ((result - 1) * (result - 1) >= a) --result;
303 return result;
304}
305
306int64_t ClosestMultiple(int64_t value, int64_t base) {
307 if (value < 0) return -ClosestMultiple(-value, base);
308 int64_t result = value / base * base;
309 if (value - result > base / 2) result += base;
310 return result;
311}
312
314 int64_t base, absl::Span<const int64_t> coeffs,
315 absl::Span<const int64_t> lbs, absl::Span<const int64_t> ubs, int64_t rhs,
316 int64_t* new_rhs) {
317 // Precompute some bounds for the equation base * X + error <= rhs.
318 int64_t max_activity = 0;
319 int64_t max_x = 0;
320 int64_t min_error = 0;
321 const int num_terms = coeffs.size();
322 if (num_terms == 0) return false;
323 for (int i = 0; i < num_terms; ++i) {
324 const int64_t coeff = coeffs[i];
325 CHECK_GT(coeff, 0);
326 const int64_t closest = ClosestMultiple(coeff, base);
327 max_activity += coeff * ubs[i];
328 max_x += closest / base * ubs[i];
329
330 const int64_t error = coeff - closest;
331 if (error >= 0) {
332 min_error += error * lbs[i];
333 } else {
334 min_error += error * ubs[i];
335 }
336 }
337
338 if (max_activity <= rhs) {
339 // The constraint is trivially true.
340 *new_rhs = max_x;
341 return true;
342 }
343
344 // This is the max error assuming that activity > rhs.
345 int64_t max_error_if_invalid = 0;
346 const int64_t slack = max_activity - rhs - 1;
347 for (int i = 0; i < num_terms; ++i) {
348 const int64_t coeff = coeffs[i];
349 const int64_t closest = ClosestMultiple(coeff, base);
350 const int64_t error = coeff - closest;
351 if (error >= 0) {
352 max_error_if_invalid += error * ubs[i];
353 } else {
354 const int64_t lb = std::max(lbs[i], ubs[i] - slack / coeff);
355 max_error_if_invalid += error * lb;
356 }
357 }
358
359 // We have old solution valid =>
360 // base * X + error <= rhs
361 // base * X <= rhs - error
362 // base * X <= rhs - min_error
363 // X <= new_rhs
364 *new_rhs = std::min(max_x, MathUtil::FloorOfRatio(rhs - min_error, base));
365
366 // And we have old solution invalid =>
367 // base * X + error >= rhs + 1
368 // base * X >= rhs + 1 - max_error_if_invalid
369 // X >= infeasibility_bound
370 const int64_t infeasibility_bound =
371 MathUtil::CeilOfRatio(rhs + 1 - max_error_if_invalid, base);
372
373 // If the two bounds can be separated, we have an equivalence !
374 return *new_rhs < infeasibility_bound;
375}
376
378 const absl::btree_set<LiteralIndex>& processed, int relevant_prefix_size,
379 std::vector<Literal>* literals) {
380 if (literals->empty()) return -1;
381 if (!processed.contains(literals->back().Index())) {
382 return std::min<int>(relevant_prefix_size, literals->size());
383 }
384
385 // To get O(n log n) size of suffixes, we will first process the last n/2
386 // literals, we then move all of them first and process the n/2 literals left.
387 // We use the same algorithm recursively. The sum of the suffixes' size S(n)
388 // is thus S(n/2) + n + S(n/2). That gives us the correct complexity. The code
389 // below simulates one step of this algorithm and is made to be "robust" when
390 // from one call to the next, some literals have been removed (but the order
391 // of literals is preserved).
392 int num_processed = 0;
393 int num_not_processed = 0;
394 int target_prefix_size = literals->size() - 1;
395 for (int i = literals->size() - 1; i >= 0; i--) {
396 if (processed.contains((*literals)[i].Index())) {
397 ++num_processed;
398 } else {
399 ++num_not_processed;
400 target_prefix_size = i;
401 }
402 if (num_not_processed >= num_processed) break;
403 }
404 if (num_not_processed == 0) return -1;
405 target_prefix_size = std::min(target_prefix_size, relevant_prefix_size);
406
407 // Once a prefix size has been decided, it is always better to
408 // enqueue the literal already processed first.
409 std::stable_partition(
410 literals->begin() + target_prefix_size, literals->end(),
411 [&processed](Literal l) { return processed.contains(l.Index()); });
412 return target_prefix_size;
413}
414
415int WeightedPick(absl::Span<const double> input, absl::BitGenRef random) {
416 DCHECK(!input.empty());
417
418 double total_weight = 0;
419 for (const double w : input) {
420 total_weight += w;
421 }
422
423 const double weight_point = absl::Uniform(random, 0.0f, total_weight);
424 double total_weight2 = 0;
425 for (int i = 0; i < input.size(); ++i) {
426 total_weight2 += input[i];
427 if (total_weight2 > weight_point) {
428 return i;
429 }
430 }
431 return input.size() - 1;
432}
433
434void IncrementalAverage::Reset(double reset_value) {
435 num_records_ = 0;
436 average_ = reset_value;
437}
438
439void IncrementalAverage::AddData(double new_record) {
440 num_records_++;
441 average_ += (new_record - average_) / num_records_;
442}
443
444void ExponentialMovingAverage::AddData(double new_record) {
445 num_records_++;
446 average_ = (num_records_ == 1)
447 ? new_record
448 : (new_record + decaying_factor_ * (average_ - new_record));
449}
450
451void Percentile::AddRecord(double record) {
452 records_.push_front(record);
453 if (records_.size() > record_limit_) {
454 records_.pop_back();
455 }
456}
457
458double Percentile::GetPercentile(double percent) {
459 CHECK_GT(records_.size(), 0);
460 CHECK_LE(percent, 100.0);
461 CHECK_GE(percent, 0.0);
462
463 const int num_records = records_.size();
464 const double percentile_rank =
465 static_cast<double>(num_records) * percent / 100.0 - 0.5;
466 if (percentile_rank <= 0) {
467 return *absl::c_min_element(records_);
468 } else if (percentile_rank >= num_records - 1) {
469 return *absl::c_max_element(records_);
470 }
471 std::vector<double> sorted_records;
472 sorted_records.assign(records_.begin(), records_.end());
473 // Interpolate.
474 DCHECK_GE(num_records, 2);
475 DCHECK_LT(percentile_rank, num_records - 1);
476 const int lower_rank = static_cast<int>(std::floor(percentile_rank));
477 DCHECK_LT(lower_rank, num_records - 1);
478 auto upper_it = sorted_records.begin() + lower_rank + 1;
479 // Ensure that sorted_records[lower_rank + 1] is in the correct place as if
480 // records were actually sorted, the next closest is then the largest element
481 // to the left of this element.
482 absl::c_nth_element(sorted_records, upper_it);
483 auto lower_it = std::max_element(sorted_records.begin(), upper_it);
484 return *lower_it + (percentile_rank - lower_rank) * (*upper_it - *lower_it);
485}
486
487void MaxBoundedSubsetSum::Reset(int64_t bound) {
488 DCHECK_GE(bound, 0);
489 gcd_ = 0;
490 sums_ = {0};
491 expanded_sums_.clear();
492 current_max_ = 0;
493 bound_ = bound;
494}
495
496void MaxBoundedSubsetSum::Add(int64_t value) {
497 if (value == 0) return;
498 if (value > bound_) return;
499 gcd_ = std::gcd(gcd_, value);
500 AddChoicesInternal({value});
501}
502
503void MaxBoundedSubsetSum::AddChoices(absl::Span<const int64_t> choices) {
504 if (DEBUG_MODE) {
505 for (const int64_t c : choices) {
506 DCHECK_GE(c, 0);
507 }
508 }
509
510 // The max is already reachable or we aborted.
511 if (current_max_ == bound_) return;
512
513 // Filter out zero and values greater than bound_.
514 filtered_values_.clear();
515 for (const int64_t c : choices) {
516 if (c == 0 || c > bound_) continue;
517 filtered_values_.push_back(c);
518 gcd_ = std::gcd(gcd_, c);
519 }
520 if (filtered_values_.empty()) return;
521
522 // So we can abort early in the AddChoicesInternal() inner loops.
523 std::sort(filtered_values_.begin(), filtered_values_.end());
524 AddChoicesInternal(filtered_values_);
525}
526
527void MaxBoundedSubsetSum::AddMultiples(int64_t coeff, int64_t max_value) {
528 DCHECK_GE(coeff, 0);
529 DCHECK_GE(max_value, 0);
530
531 if (coeff == 0 || max_value == 0) return;
532 if (coeff > bound_) return;
533 if (current_max_ == bound_) return;
534 gcd_ = std::gcd(gcd_, coeff);
535
536 const int64_t num_values =
537 std::min(max_value, MathUtil::FloorOfRatio(bound_, coeff));
538 if (num_values > 10) {
539 // We only keep GCD in this case.
540 sums_.clear();
541 expanded_sums_.clear();
542 current_max_ = MathUtil::FloorOfRatio(bound_, gcd_) * gcd_;
543 return;
544 }
545
546 filtered_values_.clear();
547 for (int multiple = 1; multiple <= num_values; ++multiple) {
548 const int64_t v = multiple * coeff;
549 if (v == bound_) {
550 current_max_ = bound_;
551 return;
552 }
553 filtered_values_.push_back(v);
554 }
555 AddChoicesInternal(filtered_values_);
556}
557
558void MaxBoundedSubsetSum::AddChoicesInternal(absl::Span<const int64_t> values) {
559 // Mode 1: vector of all possible sums (with duplicates).
560 if (!sums_.empty() && sums_.size() <= max_complexity_per_add_) {
561 const int old_size = sums_.size();
562 for (int i = 0; i < old_size; ++i) {
563 for (const int64_t value : values) {
564 const int64_t s = sums_[i] + value;
565 if (s > bound_) break;
566
567 sums_.push_back(s);
568 current_max_ = std::max(current_max_, s);
569 if (current_max_ == bound_) return; // Abort
570 }
571 }
572 return;
573 }
574
575 // Mode 2: bitset of all possible sums.
576 if (bound_ <= max_complexity_per_add_) {
577 if (!sums_.empty()) {
578 expanded_sums_.assign(bound_ + 1, false);
579 for (const int64_t s : sums_) {
580 expanded_sums_[s] = true;
581 }
582 sums_.clear();
583 }
584
585 // The reverse order is important to not add the current value twice.
586 if (!expanded_sums_.empty()) {
587 for (int64_t i = bound_ - 1; i >= 0; --i) {
588 if (!expanded_sums_[i]) continue;
589 for (const int64_t value : values) {
590 if (i + value > bound_) break;
591
592 expanded_sums_[i + value] = true;
593 current_max_ = std::max(current_max_, i + value);
594 if (current_max_ == bound_) return; // Abort
595 }
596 }
597 return;
598 }
599 }
600
601 // Fall back to gcd_.
602 DCHECK_NE(gcd_, 0);
603 if (gcd_ == 1) {
604 current_max_ = bound_;
605 } else {
606 current_max_ = MathUtil::FloorOfRatio(bound_, gcd_) * gcd_;
607 }
608}
609
611 absl::Span<const Domain> domains, absl::Span<const int64_t> coeffs,
612 absl::Span<const int64_t> costs, const Domain& rhs) {
613 const int num_vars = domains.size();
614 if (num_vars == 0) return {};
615
616 int64_t min_activity = 0;
617 int64_t max_domain_size = 0;
618 for (int i = 0; i < num_vars; ++i) {
619 max_domain_size = std::max(max_domain_size, domains[i].Size());
620 if (coeffs[i] > 0) {
621 min_activity += coeffs[i] * domains[i].Min();
622 } else {
623 min_activity += coeffs[i] * domains[i].Max();
624 }
625 }
626
627 // The complexity of our DP will depends on the number of "activity" values
628 // that need to be considered.
629 //
630 // TODO(user): We can also solve efficiently if max_activity - rhs.Min() is
631 // small. Implement.
632 const int64_t num_values = rhs.Max() - min_activity + 1;
633 if (num_values < 0) {
634 // Problem is clearly infeasible, we can report the result right away.
635 Result result;
636 result.solved = true;
637 result.infeasible = true;
638 return result;
639 }
640
641 // Abort if complexity too large.
642 const int64_t max_work_per_variable = std::min(num_values, max_domain_size);
643 if (rhs.Max() - min_activity > 1e6) return {};
644 if (num_vars * num_values * max_work_per_variable > 1e8) return {};
645
646 // Canonicalize to positive coeffs and non-negative variables.
647 domains_.clear();
648 coeffs_.clear();
649 costs_.clear();
650 for (int i = 0; i < num_vars; ++i) {
651 if (coeffs[i] > 0) {
652 domains_.push_back(domains[i].AdditionWith(Domain(-domains[i].Min())));
653 coeffs_.push_back(coeffs[i]);
654 costs_.push_back(costs[i]);
655 } else {
656 domains_.push_back(
657 domains[i].Negation().AdditionWith(Domain(domains[i].Max())));
658 coeffs_.push_back(-coeffs[i]);
659 costs_.push_back(-costs[i]);
660 }
661 }
662
663 Result result =
664 InternalSolve(num_values, rhs.AdditionWith(Domain(-min_activity)));
665 if (result.solved && !result.infeasible) {
666 // Transform solution back.
667 for (int i = 0; i < num_vars; ++i) {
668 if (coeffs[i] > 0) {
669 result.solution[i] += domains[i].Min();
670 } else {
671 result.solution[i] = domains[i].Max() - result.solution[i];
672 }
673 }
674 }
675 return result;
676}
677
678BasicKnapsackSolver::Result BasicKnapsackSolver::InternalSolve(
679 int64_t num_values, const Domain& rhs) {
680 const int num_vars = domains_.size();
681
682 // The set of DP states that we will fill.
683 var_activity_states_.assign(num_vars, std::vector<State>(num_values));
684
685 // Initialize with first variable.
686 for (const int64_t v : domains_[0].Values()) {
687 const int64_t value = v * coeffs_[0];
688 CHECK_GE(value, 0);
689 if (value >= num_values) break;
690 var_activity_states_[0][value].cost = v * costs_[0];
691 var_activity_states_[0][value].value = v;
692 }
693
694 // Fill rest of the DP states.
695 for (int i = 1; i < num_vars; ++i) {
696 const std::vector<State>& prev = var_activity_states_[i - 1];
697 std::vector<State>& current = var_activity_states_[i];
698 for (int prev_value = 0; prev_value < num_values; ++prev_value) {
699 if (prev[prev_value].cost == std::numeric_limits<int64_t>::max()) {
700 continue;
701 }
702 for (const int64_t v : domains_[i].Values()) {
703 const int64_t value = prev_value + v * coeffs_[i];
704 CHECK_GE(value, 0);
705 if (value >= num_values) break;
706 const int64_t new_cost = prev[prev_value].cost + v * costs_[i];
707 if (new_cost < current[value].cost) {
708 current[value].cost = new_cost;
709 current[value].value = v;
710 }
711 }
712 }
713 }
714
715 Result result;
716 result.solved = true;
717
718 int64_t best_cost = std::numeric_limits<int64_t>::max();
719 int64_t best_activity;
720 for (int v = 0; v < num_values; ++v) {
721 // TODO(user): optimize this?
722 if (!rhs.Contains(v)) continue;
723 if (var_activity_states_.back()[v].cost < best_cost) {
724 best_cost = var_activity_states_.back()[v].cost;
725 best_activity = v;
726 }
727 }
728
729 if (best_cost == std::numeric_limits<int64_t>::max()) {
730 result.infeasible = true;
731 return result;
732 }
733
734 // Recover the values.
735 result.solution.resize(num_vars);
736 int64_t current_activity = best_activity;
737 for (int i = num_vars - 1; i >= 0; --i) {
738 const int64_t var_value = var_activity_states_[i][current_activity].value;
739 result.solution[i] = var_value;
740 current_activity -= coeffs_[i] * var_value;
741 }
742
743 return result;
744}
745
746namespace {
747
748class CliqueDecomposition {
749 public:
750 CliqueDecomposition(const std::vector<std::vector<int>>& graph,
751 absl::BitGenRef random, std::vector<int>* buffer)
752 : graph_(graph), random_(random), buffer_(buffer) {
753 const int n = graph.size();
754 permutation_.resize(n);
755 buffer_->resize(n);
756
757 // TODO(user): Start by sorting by decreasing size of adjacency list?
758 for (int i = 0; i < n; ++i) permutation_[i] = i;
759 }
760
761 // This works in O(m). All possible decomposition are reachable, depending on
762 // the initial permutation.
763 //
764 // TODO(user): It can be made faster within the same complexity though.
765 void DecomposeGreedily() {
766 decomposition_.clear();
767 candidates_.clear();
768
769 const int n = graph_.size();
770 taken_.assign(n, false);
771 temp_.assign(n, false);
772
773 int buffer_index = 0;
774 for (const int i : permutation_) {
775 if (taken_[i]) continue;
776
777 // Save start of amo and output i.
778 const int start = buffer_index;
779 taken_[i] = true;
780 (*buffer_)[buffer_index++] = i;
781
782 candidates_.clear();
783 for (const int c : graph_[i]) {
784 if (!taken_[c]) candidates_.push_back(c);
785 }
786 while (!candidates_.empty()) {
787 // Extend at most one with lowest possible permutation index.
788 int next = candidates_.front();
789 for (const int n : candidates_) {
790 if (permutation_[n] < permutation_[next]) next = n;
791 }
792
793 // Add to current amo.
794 taken_[next] = true;
795 (*buffer_)[buffer_index++] = next;
796
797 // Filter candidates.
798 // TODO(user): With a sorted graph_, same complexity, but likely faster.
799 for (const int head : graph_[next]) temp_[head] = true;
800 int new_size = 0;
801 for (const int c : candidates_) {
802 if (taken_[c]) continue;
803 if (!temp_[c]) continue;
804 candidates_[new_size++] = c;
805 }
806 candidates_.resize(new_size);
807 for (const int head : graph_[next]) temp_[head] = false;
808 }
809
810 // Output amo.
811 decomposition_.push_back(
812 absl::MakeSpan(buffer_->data() + start, buffer_index - start));
813 }
814 DCHECK_EQ(buffer_index, n);
815 }
816
817 // We follow the heuristic in the paper: "Partitioning Networks into Cliques:
818 // A Randomized Heuristic Approach", David Chaluppa, 2014.
819 //
820 // Note that by keeping the local order of each clique, we cannot make the
821 // order "worse". And each new call to DecomposeGreedily() can only reduce
822 // the number of clique in the cover.
823 void ChangeOrder() {
824 if (absl::Bernoulli(random_, 0.5)) {
825 std::reverse(decomposition_.begin(), decomposition_.end());
826 } else {
827 std::shuffle(decomposition_.begin(), decomposition_.end(), random_);
828 }
829
830 int out_index = 0;
831 for (const absl::Span<const int> clique : decomposition_) {
832 for (const int i : clique) {
833 permutation_[out_index++] = i;
834 }
835 }
836 }
837
838 const std::vector<absl::Span<int>>& decomposition() const {
839 return decomposition_;
840 }
841
842 private:
843 const std::vector<std::vector<int>>& graph_;
844 absl::BitGenRef random_;
845 std::vector<int>* buffer_;
846
847 std::vector<absl::Span<int>> decomposition_;
848 std::vector<int> candidates_;
849 std::vector<int> permutation_;
850 std::vector<bool> taken_;
851 std::vector<bool> temp_;
852};
853
854} // namespace
855
856std::vector<absl::Span<int>> AtMostOneDecomposition(
857 const std::vector<std::vector<int>>& graph, absl::BitGenRef random,
858 std::vector<int>* buffer) {
859 CliqueDecomposition decomposer(graph, random, buffer);
860 for (int pass = 0; pass < 10; ++pass) {
861 decomposer.DecomposeGreedily();
862 if (decomposer.decomposition().size() == 1) break;
863 decomposer.ChangeOrder();
864 }
865 return decomposer.decomposition();
866}
867
868absl::Span<const int64_t> SortedSubsetSums::Compute(
869 absl::Span<const int64_t> elements, int64_t maximum_sum,
870 bool abort_if_maximum_reached) {
871 sums_.clear();
872 sums_.push_back(0);
873 for (const int64_t e : elements) {
874 DCHECK_GE(e, 0);
875 if (e == 0 || e > maximum_sum) continue;
876
877 // Optimization: If all the sums in [0, maximum_sum] are already reachable
878 // we can abort early since no new reachable sum wil be discovered.
879 if (sums_.size() == maximum_sum + 1) return sums_;
880
881 // Early abort when asked if we already reached maximum_sum.
882 if (abort_if_maximum_reached && sums_.back() == maximum_sum) return sums_;
883
884 // We merge sort sums_ and sums_ + e into new_sums_.
885 int i = 0;
886 int j = 0;
887 new_sums_.clear();
888 const int size = sums_.size();
889 const int64_t* const data = sums_.data();
890 int64_t last_pushed = -1;
891 while (i < size) {
892 DCHECK_LE(j, i); // Since we add a positive e.
893 const int64_t a = data[i];
894 const int64_t b = data[j] + e;
895 int64_t to_push;
896 if (a <= b) {
897 ++i;
898 if (a == b) ++j;
899 to_push = a;
900 } else {
901 to_push = b;
902 ++j;
903 }
904 if (to_push == last_pushed) continue;
905 if (to_push > maximum_sum) {
906 j = size; // so we don't keep pushing below.
907 break;
908 }
909 last_pushed = to_push;
910 new_sums_.push_back(to_push);
911 }
912
913 // We are sure of this since we will break only when to_push > maximum_sum
914 // and we are guarantee to have pushed all sums below "to_push" before, that
915 // includes all the initial sums in sums_.
916 DCHECK_EQ(i, size);
917
918 for (; j < size; ++j) {
919 const int64_t to_push = data[j] + e;
920 if (to_push == last_pushed) continue;
921 if (to_push > maximum_sum) break;
922 last_pushed = to_push;
923 new_sums_.push_back(to_push);
924 }
925 std::swap(sums_, new_sums_);
926 }
927 return sums_;
928}
929
931 int64_t bin_size) {
932 double estimate = std::numeric_limits<double>::infinity();
933 if (num_elements < 100) {
934 estimate = 2 * pow(2.0, num_elements / 2);
935 }
936 return std::min(estimate, static_cast<double>(bin_size) *
937 static_cast<double>(num_elements));
938}
939
941 absl::Span<const int64_t> elements, int64_t bin_size) {
942 // Get rid of a few easy to decide corner cases.
943 if (elements.empty()) return 0;
944 if (elements.size() == 1) {
945 if (elements[0] > bin_size) return 0;
946 return elements[0];
947 }
948
949 // We split the elements in two equal-sized parts.
950 const int middle = elements.size() / 2;
951 const auto span_a = sums_a_.Compute(elements.subspan(0, middle), bin_size,
952 /*abort_if_maximum_reached=*/true);
953 if (span_a.back() == bin_size) return bin_size;
954
955 const auto span_b = sums_b_.Compute(elements.subspan(middle), bin_size,
956 /*abort_if_maximum_reached=*/true);
957 if (span_b.back() == bin_size) return bin_size;
958
959 // For all possible sum a, we compute the largest sum b that fits.
960 // We do that in linear time thanks to the sorted partial sums.
961 int64_t result = 0;
962 CHECK(!span_a.empty());
963 CHECK(!span_b.empty());
964 int j = span_b.size() - 1;
965 for (int i = 0; i < span_a.size(); ++i) {
966 while (j >= 0 && span_a[i] + span_b[j] > bin_size) --j;
967 result = std::max(result, span_a[i] + span_b[j]);
968 if (result == bin_size) return bin_size;
969 }
970 return result;
971}
972
973std::vector<int> FindMostDiverseSubset(int k, int n,
974 absl::Span<const int64_t> distances,
975 std::vector<int64_t>& buffer,
976 int always_pick_mask) {
977 DCHECK_LE(k, n);
978 std::vector<int> result;
979 result.reserve(k);
980
981 if (k == n) {
982 for (int i = 0; i < n; ++i) result.push_back(i);
983 return result;
984 }
985
986 if (k == n - 1) {
987 // We just exclude the one closer to all the other.
988 int64_t worse = std::numeric_limits<int64_t>::max();
989 int to_exclude = -1;
990 for (int i = 0; i < n; ++i) {
991 if ((always_pick_mask >> i) & 1) continue;
992
993 int64_t score = 0;
994 for (int j = 0; j < n; ++j) {
995 if (i != j) score += distances[i * n + j];
996 }
997 if (score < worse) {
998 worse = score;
999 to_exclude = i;
1000 }
1001 }
1002
1003 CHECK_NE(to_exclude, -1);
1004 for (int i = 0; i < n; ++i) {
1005 if (i != to_exclude) result.push_back(i);
1006 }
1007 return result;
1008 }
1009
1010 CHECK_LE(n, 25);
1011 const int limit = 1 << n;
1012 buffer.assign(limit, 0);
1013 int best_mask;
1014 int best_value = -1;
1015 for (unsigned int mask = 1; mask < limit; ++mask) {
1016 const int hamming_weight = absl::popcount(mask);
1017
1018 // TODO(user): Increase mask by more than one ? but counting to 1k is fast
1019 // anyway.
1020 if (hamming_weight > k) continue;
1021 int low_bit = -1;
1022 int64_t sum = 0;
1023 for (int i = 0; i < n; ++i) {
1024 if ((mask >> i) & 1) {
1025 if (low_bit == -1) {
1026 low_bit = i;
1027 } else {
1028 sum += distances[low_bit * n + i];
1029 }
1030 }
1031 }
1032 buffer[mask] = buffer[mask ^ (1 << low_bit)] + sum;
1033 if (hamming_weight == k && buffer[mask] > best_value) {
1034 if ((mask & always_pick_mask) != always_pick_mask) continue;
1035 best_value = buffer[mask];
1036 best_mask = mask;
1037 }
1038 }
1039
1040 for (int i = 0; i < n; ++i) {
1041 if ((best_mask >> i) & 1) {
1042 result.push_back(i);
1043 }
1044 }
1045 return result;
1046}
1047
1048std::vector<std::pair<int, int>> HeuristicallySplitLongLinear(
1049 absl::Span<const int64_t> coeffs) {
1050 std::vector<std::pair<int, int>> result;
1051 if (coeffs.empty()) return result;
1052
1053 // Split an interval [0, size) into num_parts mostly equal parts.
1054 const auto append_splits = [&result](int offset, int size, int num_parts) {
1055 int previous_start = 0;
1056 for (int64_t b = 0; b < num_parts; ++b) {
1057 const int next_start = static_cast<int64_t>(b + 1) * size / num_parts;
1058 result.push_back({offset + previous_start, next_start - previous_start});
1059 previous_start = next_start;
1060 }
1061 };
1062
1063 const int num_terms = coeffs.size();
1064 const int num_buckets = static_cast<int>(std::round(std::sqrt(num_terms)));
1065
1066 int num_differents = 1;
1067 for (int i = 1; i < num_terms; ++i) {
1068 if (coeffs[i - 1] != coeffs[i]) ++num_differents;
1069 }
1070
1071 // If we don't have many different coefficients, we always create parts
1072 // with exactly the same coeffs. We split large part evenly into size /
1073 // expected_part_size.
1074 if (num_differents < 20) {
1075 const int expected_part_size = num_terms / num_buckets;
1076
1077 for (int i = 0; i < num_terms;) {
1078 int j = i + 1;
1079 for (; j < num_terms; ++j) {
1080 if (coeffs[j] != coeffs[i]) break;
1081 }
1082
1083 const int local_size = j - i;
1084 const int num_local_buckets =
1085 MathUtil::CeilOfRatio(local_size, expected_part_size);
1086 append_splits(i, local_size, num_local_buckets);
1087
1088 i = j;
1089 }
1090
1091 return result;
1092 }
1093
1094 // Otherwise we just split into num_buckets buckets.
1095 append_splits(0, num_terms, num_buckets);
1096 return result;
1097}
1098
1099} // namespace sat
1100} // namespace operations_research
Domain IntersectionWith(const Domain &domain) const
bool Contains(int64_t value) const
Domain AdditionWith(const Domain &domain) const
Domain InverseMultiplicationBy(int64_t coeff) const
static IntegralType CeilOfRatio(IntegralType numerator, IntegralType denominator)
Definition mathutil.h:39
static IntegralType FloorOfRatio(IntegralType numerator, IntegralType denominator)
Definition mathutil.h:53
Result Solve(absl::Span< const Domain > domains, absl::Span< const int64_t > coeffs, absl::Span< const int64_t > costs, const Domain &rhs)
Definition util.cc:610
double ComplexityEstimate(int num_elements, int64_t bin_size)
Definition util.cc:930
int64_t MaxSubsetSum(absl::Span< const int64_t > elements, int64_t bin_size)
Definition util.cc:940
void AddChoices(absl::Span< const int64_t > choices)
Definition util.cc:503
void AddMultiples(int64_t coeff, int64_t max_value)
Definition util.cc:527
void AddRecord(double record)
Definition util.cc:451
double GetPercentile(double percent)
Definition util.cc:458
static const ::google::protobuf::EnumDescriptor *PROTOBUF_NONNULL Polarity_descriptor()
void set_preferred_variable_order(::operations_research::sat::SatParameters_VariableOrder value)
void set_initial_polarity(::operations_research::sat::SatParameters_Polarity value)
SatParameters_VariableOrder VariableOrder
static const ::google::protobuf::EnumDescriptor *PROTOBUF_NONNULL VariableOrder_descriptor()
absl::Span< const int64_t > Compute(absl::Span< const int64_t > elements, int64_t maximum_sum, bool abort_if_maximum_reached=false)
Definition util.cc:868
void RandomizeDecisionHeuristic(absl::BitGenRef random, SatParameters *parameters)
Definition util.cc:90
int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs)
Definition util.cc:171
bool SolveDiophantineEquationOfSizeTwo(int64_t &a, int64_t &b, int64_t &cte, int64_t &x0, int64_t &y0)
Definition util.cc:193
int64_t FloorSquareRoot(int64_t a)
Definition util.cc:289
int64_t CeilSquareRoot(int64_t a)
Definition util.cc:298
int64_t ModularInverse(int64_t x, int64_t m)
Definition util.cc:133
int WeightedPick(absl::Span< const double > input, absl::BitGenRef random)
Definition util.cc:415
std::vector< std::pair< int, int > > HeuristicallySplitLongLinear(absl::Span< const int64_t > coeffs)
Definition util.cc:1048
bool DiophantineEquationOfSizeTwoHasSolutionInDomain(const Domain &x, int64_t a, const Domain &y, int64_t b, int64_t cte)
Definition util.cc:237
std::vector< int > FindMostDiverseSubset(int k, int n, absl::Span< const int64_t > distances, std::vector< int64_t > &buffer, int always_pick_mask)
Definition util.cc:973
int64_t ClosestMultiple(int64_t value, int64_t base)
Definition util.cc:306
int64_t PositiveMod(int64_t x, int64_t m)
Definition util.cc:166
bool LinearInequalityCanBeReducedWithClosestMultiple(int64_t base, absl::Span< const int64_t > coeffs, absl::Span< const int64_t > lbs, absl::Span< const int64_t > ubs, int64_t rhs, int64_t *new_rhs)
Definition util.cc:313
int MoveOneUnprocessedLiteralLast(const absl::btree_set< LiteralIndex > &processed, int relevant_prefix_size, std::vector< Literal > *literals)
Definition util.cc:377
std::vector< absl::Span< int > > AtMostOneDecomposition(const std::vector< std::vector< int > > &graph, absl::BitGenRef random, std::vector< int > *buffer)
Definition util.cc:856
std::string FormatTable(std::vector< std::vector< std::string > > &table, int spacing)
Definition util.cc:61
OR-Tools root namespace.
const bool DEBUG_MODE
Definition radix_sort.h:266
int64_t CapProd(int64_t x, int64_t y)
trees with all degrees equal w the current value of w
static int input(yyscan_t yyscanner)