Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
util.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/util.h"
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <deque>
20#include <limits>
21#include <numeric>
22#include <string>
23#include <vector>
24
25#include "absl/algorithm/container.h"
26#include "absl/container/btree_set.h"
27#include "absl/log/check.h"
28#include "absl/numeric/int128.h"
29#include "absl/random/bit_gen_ref.h"
30#include "absl/random/distributions.h"
31#include "absl/strings/str_cat.h"
32#include "absl/types/span.h"
33#include "google/protobuf/descriptor.h"
37#include "ortools/sat/sat_parameters.pb.h"
40
41namespace operations_research {
42namespace sat {
43
44std::string FormatCounter(int64_t num) {
45 std::string s = absl::StrCat(num);
46 std::string out;
47 const int size = s.size();
48 for (int i = 0; i < size; ++i) {
49 if (i > 0 && (size - i) % 3 == 0) {
50 out.push_back('\'');
51 }
52 out.push_back(s[i]);
53 }
54 return out;
55}
56
57namespace {
58
59inline std::string LeftAlign(std::string s, int size = 16) {
60 if (s.size() >= size) return s;
61 s.resize(size, ' ');
62 return s;
63}
64
65inline std::string RightAlign(std::string s, int size = 16) {
66 if (s.size() >= size) return s;
67 return absl::StrCat(std::string(size - s.size(), ' '), s);
68}
69
70} // namespace
71
72std::string FormatTable(std::vector<std::vector<std::string>>& table,
73 int spacing) {
74 if (table.size() > 1) {
75 // We order by name.
76 std::sort(table.begin() + 1, table.end());
77 }
78
79 std::vector<int> widths;
80 for (const std::vector<std::string>& line : table) {
81 if (line.size() > widths.size()) widths.resize(line.size(), spacing);
82 for (int j = 0; j < line.size(); ++j) {
83 widths[j] = std::max<int>(widths[j], line[j].size() + spacing);
84 }
85 }
86 std::string output;
87 for (int i = 0; i < table.size(); ++i) {
88 for (int j = 0; j < table[i].size(); ++j) {
89 if (i == 0 && j == 0) {
90 // We currently only left align the table name.
91 absl::StrAppend(&output, LeftAlign(table[i][j], widths[j]));
92 } else {
93 absl::StrAppend(&output, RightAlign(table[i][j], widths[j]));
94 }
95 }
96 absl::StrAppend(&output, "\n");
97 }
98 return output;
99}
100
101void RandomizeDecisionHeuristic(absl::BitGenRef random,
102 SatParameters* parameters) {
103#if !defined(__PORTABLE_PLATFORM__)
104 // Random preferred variable order.
105 const google::protobuf::EnumDescriptor* order_d =
106 SatParameters::VariableOrder_descriptor();
107 parameters->set_preferred_variable_order(
108 static_cast<SatParameters::VariableOrder>(
109 order_d->value(absl::Uniform(random, 0, order_d->value_count()))
110 ->number()));
111
112 // Random polarity initial value.
113 const google::protobuf::EnumDescriptor* polarity_d =
114 SatParameters::Polarity_descriptor();
115 parameters->set_initial_polarity(static_cast<SatParameters::Polarity>(
116 polarity_d->value(absl::Uniform(random, 0, polarity_d->value_count()))
117 ->number()));
118#endif // __PORTABLE_PLATFORM__
119 // Other random parameters.
120 parameters->set_use_phase_saving(absl::Bernoulli(random, 0.5));
121 parameters->set_random_polarity_ratio(absl::Bernoulli(random, 0.5) ? 0.01
122 : 0.0);
123 parameters->set_random_branches_ratio(absl::Bernoulli(random, 0.5) ? 0.01
124 : 0.0);
125}
126
127namespace {
128
129// This will be optimized into one division. I tested that in other places:
130// 3/ortools/sat/integer_test.cc;l=1223-1228;bpv=0
131//
132// Note that I am not 100% sure we need the indirection for the optimization
133// to kick in though, but this seemed safer given our weird r[i ^ 1] inputs.
134void QuotientAndRemainder(int64_t a, int64_t b, int64_t& q, int64_t& r) {
135 q = a / b;
136 r = a % b;
137}
138
139} // namespace
140
141// Using the extended Euclidean algo, we find a and b such that
142// a x + b m = gcd(x, m)
143// https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
144int64_t ModularInverse(int64_t x, int64_t m) {
145 DCHECK_GE(x, 0);
146 DCHECK_LT(x, m);
147
148 int64_t r[2] = {m, x};
149 int64_t t[2] = {0, 1};
150 int64_t q;
151
152 // We only keep the last two terms of the sequences with the "^1" trick:
153 //
154 // q = r[i-2] / r[i-1]
155 // r[i] = r[i-2] % r[i-1]
156 // t[i] = t[i-2] - t[i-1] * q
157 //
158 // We always have:
159 // - gcd(r[i], r[i - 1]) = gcd(r[i - 1], r[i - 2])
160 // - x * t[i] + m * t[i - 1] = r[i]
161 int i = 0;
162 for (; r[i ^ 1] != 0; i ^= 1) {
163 QuotientAndRemainder(r[i], r[i ^ 1], q, r[i]);
164 t[i] -= t[i ^ 1] * q;
165 }
166
167 // If the gcd is not one, there is no inverse, we returns 0.
168 if (r[i] != 1) return 0;
169
170 // Correct the result so that it is in [0, m). Note that abs(t[i]) is known to
171 // be less than or equal to x / 2, and we have thorough unit-tests.
172 if (t[i] < 0) t[i] += m;
173
174 return t[i];
175}
176
177int64_t PositiveMod(int64_t x, int64_t m) {
178 const int64_t r = x % m;
179 return r < 0 ? r + m : r;
180}
181
182int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs) {
183 DCHECK_NE(coeff, 0);
184 DCHECK_NE(mod, 0);
185
186 mod = std::abs(mod);
187 if (rhs == 0 || mod == 1) return 0;
188 DCHECK_EQ(std::gcd(std::abs(coeff), mod), 1);
189
190 // Make both in [0, mod).
191 coeff = PositiveMod(coeff, mod);
192 rhs = PositiveMod(rhs, mod);
193
194 // From X * coeff % mod = rhs
195 // We deduce that X % mod = rhs * inverse % mod
196 const int64_t inverse = ModularInverse(coeff, mod);
197 CHECK_NE(inverse, 0);
198
199 // We make the operation in 128 bits to be sure not to have any overflow here.
200 const absl::int128 p = absl::int128{inverse} * absl::int128{rhs};
201 return static_cast<int64_t>(p % absl::int128{mod});
202}
203
204bool SolveDiophantineEquationOfSizeTwo(int64_t& a, int64_t& b, int64_t& cte,
205 int64_t& x0, int64_t& y0) {
206 CHECK_NE(a, 0);
207 CHECK_NE(b, 0);
208 CHECK_NE(a, std::numeric_limits<int64_t>::min());
209 CHECK_NE(b, std::numeric_limits<int64_t>::min());
210
211 const int64_t gcd = std::gcd(std::abs(a), std::abs(b));
212 if (cte % gcd != 0) return false;
213 a /= gcd;
214 b /= gcd;
215 cte /= gcd;
216
217 // The simple case where (0, 0) is a solution.
218 if (cte == 0) {
219 x0 = y0 = 0;
220 return true;
221 }
222
223 // We solve a * X + b * Y = cte
224 // We take a valid x0 in [0, b) by considering the equation mod b.
225 x0 = ProductWithModularInverse(a, b, cte);
226
227 // We choose x0 of the same sign as cte.
228 if (cte < 0 && x0 != 0) x0 -= std::abs(b);
229
230 // By plugging X = x0 + b * Z
231 // We have a * (x0 + b * Z) + b * Y = cte
232 // so a * b * Z + b * Y = cte - a * x0;
233 // and y0 = (cte - a * x0) / b (with an exact division by construction).
234 const absl::int128 t = absl::int128{cte} - absl::int128{a} * absl::int128{x0};
235 DCHECK_EQ(t % absl::int128{b}, absl::int128{0});
236
237 // Overflow-wise, there is two cases for cte > 0:
238 // - a * x0 <= cte, in this case y0 will not overflow (<= cte).
239 // - a * x0 > cte, in this case y0 will be in (-a, 0].
240 const absl::int128 r = t / absl::int128{b};
241 DCHECK_LE(r, absl::int128{std::numeric_limits<int64_t>::max()});
242 DCHECK_GE(r, absl::int128{std::numeric_limits<int64_t>::min()});
243
244 y0 = static_cast<int64_t>(r);
245 return true;
246}
247
249 const Domain& y, int64_t b,
250 int64_t cte) {
251 if (x.IsEmpty() || y.IsEmpty()) return false;
252 if (a == 0 && b == 0) {
253 return cte == 0;
254 }
255 if (a == 0) {
256 const int64_t div = cte / b;
257 if (b * div != cte) return false;
258 return y.Contains(div);
259 }
260 if (b == 0) {
261 const int64_t div = cte / a;
262 if (a * div != cte) return false;
263 return x.Contains(div);
264 }
265
266 if (a == b) {
267 const int64_t div = cte / a;
268 if (a * div != cte) {
269 return false;
270 }
271 return !Domain(x)
272 .Negation()
273 .AdditionWith(Domain(div))
275 .IsEmpty();
276 }
277
278 int64_t x0, y0;
279 if (!SolveDiophantineEquationOfSizeTwo(a, b, cte, x0, y0)) {
280 return false;
281 }
282 const Domain z_domain =
283 x.AdditionWith(Domain(-x0))
284 .InverseMultiplicationBy(b)
285 .IntersectionWith(
286 y.AdditionWith(Domain(-y0)).InverseMultiplicationBy(-a));
287
288 const Domain z_restricted_d1 =
289 x.AdditionWith(Domain(-x0)).InverseMultiplicationBy(b);
290 const Domain z_restricted_d2 =
292 const Domain z_restricted_domain =
293 z_restricted_d1.IntersectionWith(z_restricted_d2);
294 return !z_restricted_domain.IsEmpty();
295}
296
297// TODO(user): Find better implementation? In practice passing via double is
298// almost always correct, but the CapProd() might be a bit slow. However this
299// is only called when we do propagate something.
300int64_t FloorSquareRoot(int64_t a) {
301 int64_t result =
302 static_cast<int64_t>(std::floor(std::sqrt(static_cast<double>(a))));
303 while (CapProd(result, result) > a) --result;
304 while (CapProd(result + 1, result + 1) <= a) ++result;
305 return result;
306}
307
308// TODO(user): Find better implementation?
309int64_t CeilSquareRoot(int64_t a) {
310 int64_t result =
311 static_cast<int64_t>(std::ceil(std::sqrt(static_cast<double>(a))));
312 while (CapProd(result, result) < a) ++result;
313 while ((result - 1) * (result - 1) >= a) --result;
314 return result;
315}
316
317int64_t ClosestMultiple(int64_t value, int64_t base) {
318 if (value < 0) return -ClosestMultiple(-value, base);
319 int64_t result = value / base * base;
320 if (value - result > base / 2) result += base;
321 return result;
322}
323
325 int64_t base, absl::Span<const int64_t> coeffs,
326 absl::Span<const int64_t> lbs, absl::Span<const int64_t> ubs, int64_t rhs,
327 int64_t* new_rhs) {
328 // Precompute some bounds for the equation base * X + error <= rhs.
329 int64_t max_activity = 0;
330 int64_t max_x = 0;
331 int64_t min_error = 0;
332 const int num_terms = coeffs.size();
333 if (num_terms == 0) return false;
334 for (int i = 0; i < num_terms; ++i) {
335 const int64_t coeff = coeffs[i];
336 CHECK_GT(coeff, 0);
337 const int64_t closest = ClosestMultiple(coeff, base);
338 max_activity += coeff * ubs[i];
339 max_x += closest / base * ubs[i];
340
341 const int64_t error = coeff - closest;
342 if (error >= 0) {
343 min_error += error * lbs[i];
344 } else {
345 min_error += error * ubs[i];
346 }
347 }
348
349 if (max_activity <= rhs) {
350 // The constraint is trivially true.
351 *new_rhs = max_x;
352 return true;
353 }
354
355 // This is the max error assuming that activity > rhs.
356 int64_t max_error_if_invalid = 0;
357 const int64_t slack = max_activity - rhs - 1;
358 for (int i = 0; i < num_terms; ++i) {
359 const int64_t coeff = coeffs[i];
360 const int64_t closest = ClosestMultiple(coeff, base);
361 const int64_t error = coeff - closest;
362 if (error >= 0) {
363 max_error_if_invalid += error * ubs[i];
364 } else {
365 const int64_t lb = std::max(lbs[i], ubs[i] - slack / coeff);
366 max_error_if_invalid += error * lb;
367 }
368 }
369
370 // We have old solution valid =>
371 // base * X + error <= rhs
372 // base * X <= rhs - error
373 // base * X <= rhs - min_error
374 // X <= new_rhs
375 *new_rhs = std::min(max_x, MathUtil::FloorOfRatio(rhs - min_error, base));
376
377 // And we have old solution invalid =>
378 // base * X + error >= rhs + 1
379 // base * X >= rhs + 1 - max_error_if_invalid
380 // X >= infeasibility_bound
381 const int64_t infeasibility_bound =
382 MathUtil::CeilOfRatio(rhs + 1 - max_error_if_invalid, base);
383
384 // If the two bounds can be separated, we have an equivalence !
385 return *new_rhs < infeasibility_bound;
386}
387
389 const absl::btree_set<LiteralIndex>& processed, int relevant_prefix_size,
390 std::vector<Literal>* literals) {
391 if (literals->empty()) return -1;
392 if (!processed.contains(literals->back().Index())) {
393 return std::min<int>(relevant_prefix_size, literals->size());
394 }
395
396 // To get O(n log n) size of suffixes, we will first process the last n/2
397 // literals, we then move all of them first and process the n/2 literals left.
398 // We use the same algorithm recursively. The sum of the suffixes' size S(n)
399 // is thus S(n/2) + n + S(n/2). That gives us the correct complexity. The code
400 // below simulates one step of this algorithm and is made to be "robust" when
401 // from one call to the next, some literals have been removed (but the order
402 // of literals is preserved).
403 int num_processed = 0;
404 int num_not_processed = 0;
405 int target_prefix_size = literals->size() - 1;
406 for (int i = literals->size() - 1; i >= 0; i--) {
407 if (processed.contains((*literals)[i].Index())) {
408 ++num_processed;
409 } else {
410 ++num_not_processed;
411 target_prefix_size = i;
412 }
413 if (num_not_processed >= num_processed) break;
414 }
415 if (num_not_processed == 0) return -1;
416 target_prefix_size = std::min(target_prefix_size, relevant_prefix_size);
417
418 // Once a prefix size has been decided, it is always better to
419 // enqueue the literal already processed first.
420 std::stable_partition(
421 literals->begin() + target_prefix_size, literals->end(),
422 [&processed](Literal l) { return processed.contains(l.Index()); });
423 return target_prefix_size;
424}
425
426int WeightedPick(absl::Span<const double> input, absl::BitGenRef random) {
427 DCHECK(!input.empty());
428
429 double total_weight = 0;
430 for (const double w : input) {
431 total_weight += w;
432 }
433
434 const double weight_point = absl::Uniform(random, 0.0f, total_weight);
435 double total_weight2 = 0;
436 for (int i = 0; i < input.size(); ++i) {
437 total_weight2 += input[i];
438 if (total_weight2 > weight_point) {
439 return i;
440 }
441 }
442 return input.size() - 1;
443}
444
445void IncrementalAverage::Reset(double reset_value) {
446 num_records_ = 0;
447 average_ = reset_value;
448}
449
450void IncrementalAverage::AddData(double new_record) {
451 num_records_++;
452 average_ += (new_record - average_) / num_records_;
453}
454
455void ExponentialMovingAverage::AddData(double new_record) {
456 num_records_++;
457 average_ = (num_records_ == 1)
458 ? new_record
459 : (new_record + decaying_factor_ * (average_ - new_record));
460}
461
462void Percentile::AddRecord(double record) {
463 records_.push_front(record);
464 if (records_.size() > record_limit_) {
465 records_.pop_back();
466 }
467}
468
469double Percentile::GetPercentile(double percent) {
470 CHECK_GT(records_.size(), 0);
471 CHECK_LE(percent, 100.0);
472 CHECK_GE(percent, 0.0);
473
474 const int num_records = records_.size();
475 const double percentile_rank =
476 static_cast<double>(num_records) * percent / 100.0 - 0.5;
477 if (percentile_rank <= 0) {
478 return *absl::c_min_element(records_);
479 } else if (percentile_rank >= num_records - 1) {
480 return *absl::c_max_element(records_);
481 }
482 std::vector<double> sorted_records;
483 sorted_records.assign(records_.begin(), records_.end());
484 // Interpolate.
485 DCHECK_GE(num_records, 2);
486 DCHECK_LT(percentile_rank, num_records - 1);
487 const int lower_rank = static_cast<int>(std::floor(percentile_rank));
488 DCHECK_LT(lower_rank, num_records - 1);
489 auto upper_it = sorted_records.begin() + lower_rank + 1;
490 // Ensure that sorted_records[lower_rank + 1] is in the correct place as if
491 // records were actually sorted, the next closest is then the largest element
492 // to the left of this element.
493 absl::c_nth_element(sorted_records, upper_it);
494 auto lower_it = std::max_element(sorted_records.begin(), upper_it);
495 return *lower_it + (percentile_rank - lower_rank) * (*upper_it - *lower_it);
496}
497
498void MaxBoundedSubsetSum::Reset(int64_t bound) {
499 DCHECK_GE(bound, 0);
500 gcd_ = 0;
501 sums_ = {0};
502 expanded_sums_.clear();
503 current_max_ = 0;
504 bound_ = bound;
505}
506
507void MaxBoundedSubsetSum::Add(int64_t value) {
508 if (value == 0) return;
509 if (value > bound_) return;
510 gcd_ = std::gcd(gcd_, value);
511 AddChoicesInternal({value});
512}
513
514void MaxBoundedSubsetSum::AddChoices(absl::Span<const int64_t> choices) {
515 if (DEBUG_MODE) {
516 for (const int64_t c : choices) {
517 DCHECK_GE(c, 0);
518 }
519 }
520
521 // The max is already reachable or we aborted.
522 if (current_max_ == bound_) return;
523
524 // Filter out zero and values greater than bound_.
525 filtered_values_.clear();
526 for (const int64_t c : choices) {
527 if (c == 0 || c > bound_) continue;
528 filtered_values_.push_back(c);
529 gcd_ = std::gcd(gcd_, c);
530 }
531 if (filtered_values_.empty()) return;
532
533 // So we can abort early in the AddChoicesInternal() inner loops.
534 std::sort(filtered_values_.begin(), filtered_values_.end());
535 AddChoicesInternal(filtered_values_);
536}
537
538void MaxBoundedSubsetSum::AddMultiples(int64_t coeff, int64_t max_value) {
539 DCHECK_GE(coeff, 0);
540 DCHECK_GE(max_value, 0);
541
542 if (coeff == 0 || max_value == 0) return;
543 if (coeff > bound_) return;
544 if (current_max_ == bound_) return;
545 gcd_ = std::gcd(gcd_, coeff);
546
547 const int64_t num_values =
548 std::min(max_value, MathUtil::FloorOfRatio(bound_, coeff));
549 if (num_values > 10) {
550 // We only keep GCD in this case.
551 sums_.clear();
552 expanded_sums_.clear();
553 current_max_ = MathUtil::FloorOfRatio(bound_, gcd_) * gcd_;
554 return;
555 }
556
557 filtered_values_.clear();
558 for (int multiple = 1; multiple <= num_values; ++multiple) {
559 const int64_t v = multiple * coeff;
560 if (v == bound_) {
561 current_max_ = bound_;
562 return;
563 }
564 filtered_values_.push_back(v);
565 }
566 AddChoicesInternal(filtered_values_);
567}
568
569void MaxBoundedSubsetSum::AddChoicesInternal(absl::Span<const int64_t> values) {
570 // Mode 1: vector of all possible sums (with duplicates).
571 if (!sums_.empty() && sums_.size() <= max_complexity_per_add_) {
572 const int old_size = sums_.size();
573 for (int i = 0; i < old_size; ++i) {
574 for (const int64_t value : values) {
575 const int64_t s = sums_[i] + value;
576 if (s > bound_) break;
577
578 sums_.push_back(s);
579 current_max_ = std::max(current_max_, s);
580 if (current_max_ == bound_) return; // Abort
581 }
582 }
583 return;
584 }
585
586 // Mode 2: bitset of all possible sums.
587 if (bound_ <= max_complexity_per_add_) {
588 if (!sums_.empty()) {
589 expanded_sums_.assign(bound_ + 1, false);
590 for (const int64_t s : sums_) {
591 expanded_sums_[s] = true;
592 }
593 sums_.clear();
594 }
595
596 // The reverse order is important to not add the current value twice.
597 if (!expanded_sums_.empty()) {
598 for (int64_t i = bound_ - 1; i >= 0; --i) {
599 if (!expanded_sums_[i]) continue;
600 for (const int64_t value : values) {
601 if (i + value > bound_) break;
602
603 expanded_sums_[i + value] = true;
604 current_max_ = std::max(current_max_, i + value);
605 if (current_max_ == bound_) return; // Abort
606 }
607 }
608 return;
609 }
610 }
611
612 // Fall back to gcd_.
613 DCHECK_NE(gcd_, 0);
614 if (gcd_ == 1) {
615 current_max_ = bound_;
616 } else {
617 current_max_ = MathUtil::FloorOfRatio(bound_, gcd_) * gcd_;
618 }
619}
620
621int64_t MaxBoundedSubsetSum::MaxIfAdded(int64_t candidate) const {
622 if (candidate > bound_ || current_max_ == bound_) return current_max_;
623
624 int64_t current_max = current_max_;
625 // Mode 1: vector of all possible sums (with duplicates).
626 if (!sums_.empty()) {
627 for (const int64_t v : sums_) {
628 if (v + candidate > bound_) continue;
629 if (v + candidate > current_max) {
630 current_max = v + candidate;
631 if (current_max == bound_) return current_max;
632 }
633 }
634 return current_max;
635 }
636
637 // Mode 2: bitset of all possible sums.
638 if (!expanded_sums_.empty()) {
639 const int64_t min_useful = std::max<int64_t>(0, current_max_ - candidate);
640 const int64_t max_useful = bound_ - candidate;
641 for (int64_t v = max_useful; v >= min_useful; --v) {
642 if (expanded_sums_[v]) return v + candidate;
643 }
644 }
645 return current_max_;
646}
647
649 absl::Span<const Domain> domains, absl::Span<const int64_t> coeffs,
650 absl::Span<const int64_t> costs, const Domain& rhs) {
651 const int num_vars = domains.size();
652 if (num_vars == 0) return {};
653
654 int64_t min_activity = 0;
655 int64_t max_domain_size = 0;
656 for (int i = 0; i < num_vars; ++i) {
657 max_domain_size = std::max(max_domain_size, domains[i].Size());
658 if (coeffs[i] > 0) {
659 min_activity += coeffs[i] * domains[i].Min();
660 } else {
661 min_activity += coeffs[i] * domains[i].Max();
662 }
663 }
664
665 // The complexity of our DP will depends on the number of "activity" values
666 // that need to be considered.
667 //
668 // TODO(user): We can also solve efficiently if max_activity - rhs.Min() is
669 // small. Implement.
670 const int64_t num_values = rhs.Max() - min_activity + 1;
671 if (num_values < 0) {
672 // Problem is clearly infeasible, we can report the result right away.
673 Result result;
674 result.solved = true;
675 result.infeasible = true;
676 return result;
677 }
678
679 // Abort if complexity too large.
680 const int64_t max_work_per_variable = std::min(num_values, max_domain_size);
681 if (rhs.Max() - min_activity > 1e6) return {};
682 if (num_vars * num_values * max_work_per_variable > 1e8) return {};
683
684 // Canonicalize to positive coeffs and non-negative variables.
685 domains_.clear();
686 coeffs_.clear();
687 costs_.clear();
688 for (int i = 0; i < num_vars; ++i) {
689 if (coeffs[i] > 0) {
690 domains_.push_back(domains[i].AdditionWith(Domain(-domains[i].Min())));
691 coeffs_.push_back(coeffs[i]);
692 costs_.push_back(costs[i]);
693 } else {
694 domains_.push_back(
695 domains[i].Negation().AdditionWith(Domain(domains[i].Max())));
696 coeffs_.push_back(-coeffs[i]);
697 costs_.push_back(-costs[i]);
698 }
699 }
700
701 Result result =
702 InternalSolve(num_values, rhs.AdditionWith(Domain(-min_activity)));
703 if (result.solved && !result.infeasible) {
704 // Transform solution back.
705 for (int i = 0; i < num_vars; ++i) {
706 if (coeffs[i] > 0) {
707 result.solution[i] += domains[i].Min();
708 } else {
709 result.solution[i] = domains[i].Max() - result.solution[i];
710 }
711 }
712 }
713 return result;
714}
715
716BasicKnapsackSolver::Result BasicKnapsackSolver::InternalSolve(
717 int64_t num_values, const Domain& rhs) {
718 const int num_vars = domains_.size();
719
720 // The set of DP states that we will fill.
721 var_activity_states_.assign(num_vars, std::vector<State>(num_values));
722
723 // Initialize with first variable.
724 for (const int64_t v : domains_[0].Values()) {
725 const int64_t value = v * coeffs_[0];
726 CHECK_GE(value, 0);
727 if (value >= num_values) break;
728 var_activity_states_[0][value].cost = v * costs_[0];
729 var_activity_states_[0][value].value = v;
730 }
731
732 // Fill rest of the DP states.
733 for (int i = 1; i < num_vars; ++i) {
734 const std::vector<State>& prev = var_activity_states_[i - 1];
735 std::vector<State>& current = var_activity_states_[i];
736 for (int prev_value = 0; prev_value < num_values; ++prev_value) {
737 if (prev[prev_value].cost == std::numeric_limits<int64_t>::max()) {
738 continue;
739 }
740 for (const int64_t v : domains_[i].Values()) {
741 const int64_t value = prev_value + v * coeffs_[i];
742 CHECK_GE(value, 0);
743 if (value >= num_values) break;
744 const int64_t new_cost = prev[prev_value].cost + v * costs_[i];
745 if (new_cost < current[value].cost) {
746 current[value].cost = new_cost;
747 current[value].value = v;
748 }
749 }
750 }
751 }
752
753 Result result;
754 result.solved = true;
755
756 int64_t best_cost = std::numeric_limits<int64_t>::max();
757 int64_t best_activity;
758 for (int v = 0; v < num_values; ++v) {
759 // TODO(user): optimize this?
760 if (!rhs.Contains(v)) continue;
761 if (var_activity_states_.back()[v].cost < best_cost) {
762 best_cost = var_activity_states_.back()[v].cost;
763 best_activity = v;
764 }
765 }
766
767 if (best_cost == std::numeric_limits<int64_t>::max()) {
768 result.infeasible = true;
769 return result;
770 }
771
772 // Recover the values.
773 result.solution.resize(num_vars);
774 int64_t current_activity = best_activity;
775 for (int i = num_vars - 1; i >= 0; --i) {
776 const int64_t var_value = var_activity_states_[i][current_activity].value;
777 result.solution[i] = var_value;
778 current_activity -= coeffs_[i] * var_value;
779 }
780
781 return result;
782}
783
784namespace {
785
786class CliqueDecomposition {
787 public:
788 CliqueDecomposition(const std::vector<std::vector<int>>& graph,
789 absl::BitGenRef random, std::vector<int>* buffer)
790 : graph_(graph), random_(random), buffer_(buffer) {
791 const int n = graph.size();
792 permutation_.resize(n);
793 buffer_->resize(n);
794
795 // TODO(user): Start by sorting by decreasing size of adjacency list?
796 for (int i = 0; i < n; ++i) permutation_[i] = i;
797 }
798
799 // This works in O(m). All possible decomposition are reachable, depending on
800 // the initial permutation.
801 //
802 // TODO(user): It can be made faster within the same complexity though.
803 void DecomposeGreedily() {
804 decomposition_.clear();
805 candidates_.clear();
806
807 const int n = graph_.size();
808 taken_.assign(n, false);
809 temp_.assign(n, false);
810
811 int buffer_index = 0;
812 for (const int i : permutation_) {
813 if (taken_[i]) continue;
814
815 // Save start of amo and output i.
816 const int start = buffer_index;
817 taken_[i] = true;
818 (*buffer_)[buffer_index++] = i;
819
820 candidates_.clear();
821 for (const int c : graph_[i]) {
822 if (!taken_[c]) candidates_.push_back(c);
823 }
824 while (!candidates_.empty()) {
825 // Extend at most one with lowest possible permutation index.
826 int next = candidates_.front();
827 for (const int n : candidates_) {
828 if (permutation_[n] < permutation_[next]) next = n;
829 }
830
831 // Add to current amo.
832 taken_[next] = true;
833 (*buffer_)[buffer_index++] = next;
834
835 // Filter candidates.
836 // TODO(user): With a sorted graph_, same complexity, but likely faster.
837 for (const int head : graph_[next]) temp_[head] = true;
838 int new_size = 0;
839 for (const int c : candidates_) {
840 if (taken_[c]) continue;
841 if (!temp_[c]) continue;
842 candidates_[new_size++] = c;
843 }
844 candidates_.resize(new_size);
845 for (const int head : graph_[next]) temp_[head] = false;
846 }
847
848 // Output amo.
849 decomposition_.push_back(
850 absl::MakeSpan(buffer_->data() + start, buffer_index - start));
851 }
852 DCHECK_EQ(buffer_index, n);
853 }
854
855 // We follow the heuristic in the paper: "Partitioning Networks into Cliques:
856 // A Randomized Heuristic Approach", David Chaluppa, 2014.
857 //
858 // Note that by keeping the local order of each clique, we cannot make the
859 // order "worse". And each new call to DecomposeGreedily() can only reduce
860 // the number of clique in the cover.
861 void ChangeOrder() {
862 if (absl::Bernoulli(random_, 0.5)) {
863 std::reverse(decomposition_.begin(), decomposition_.end());
864 } else {
865 std::shuffle(decomposition_.begin(), decomposition_.end(), random_);
866 }
867
868 int out_index = 0;
869 for (const absl::Span<const int> clique : decomposition_) {
870 for (const int i : clique) {
871 permutation_[out_index++] = i;
872 }
873 }
874 }
875
876 const std::vector<absl::Span<int>>& decomposition() const {
877 return decomposition_;
878 }
879
880 private:
881 const std::vector<std::vector<int>>& graph_;
882 absl::BitGenRef random_;
883 std::vector<int>* buffer_;
884
885 std::vector<absl::Span<int>> decomposition_;
886 std::vector<int> candidates_;
887 std::vector<int> permutation_;
888 std::vector<bool> taken_;
889 std::vector<bool> temp_;
890};
891
892} // namespace
893
894std::vector<absl::Span<int>> AtMostOneDecomposition(
895 const std::vector<std::vector<int>>& graph, absl::BitGenRef random,
896 std::vector<int>* buffer) {
897 CliqueDecomposition decomposer(graph, random, buffer);
898 for (int pass = 0; pass < 10; ++pass) {
899 decomposer.DecomposeGreedily();
900 if (decomposer.decomposition().size() == 1) break;
901 decomposer.ChangeOrder();
902 }
903 return decomposer.decomposition();
904}
905
906absl::Span<const int64_t> SortedSubsetSums::Compute(
907 absl::Span<const int64_t> elements, int64_t maximum_sum,
908 bool abort_if_maximum_reached) {
909 sums_.clear();
910 sums_.push_back(0);
911 for (const int64_t e : elements) {
912 DCHECK_GE(e, 0);
913 if (e == 0 || e > maximum_sum) continue;
914
915 // Optimization: If all the sums in [0, maximum_sum] are already reachable
916 // we can abort early since no new reachable sum wil be discovered.
917 if (sums_.size() == maximum_sum + 1) return sums_;
918
919 // Early abort when asked if we already reached maximum_sum.
920 if (abort_if_maximum_reached && sums_.back() == maximum_sum) return sums_;
921
922 // We merge sort sums_ and sums_ + e into new_sums_.
923 int i = 0;
924 int j = 0;
925 new_sums_.clear();
926 const int size = sums_.size();
927 const int64_t* const data = sums_.data();
928 int64_t last_pushed = -1;
929 while (i < size) {
930 DCHECK_LE(j, i); // Since we add a positive e.
931 const int64_t a = data[i];
932 const int64_t b = data[j] + e;
933 int64_t to_push;
934 if (a <= b) {
935 ++i;
936 if (a == b) ++j;
937 to_push = a;
938 } else {
939 to_push = b;
940 ++j;
941 }
942 if (to_push == last_pushed) continue;
943 if (to_push > maximum_sum) {
944 j = size; // so we don't keep pushing below.
945 break;
946 }
947 last_pushed = to_push;
948 new_sums_.push_back(to_push);
949 }
950
951 // We are sure of this since we will break only when to_push > maximum_sum
952 // and we are guarantee to have pushed all sums below "to_push" before, that
953 // includes all the initial sums in sums_.
954 DCHECK_EQ(i, size);
955
956 for (; j < size; ++j) {
957 const int64_t to_push = data[j] + e;
958 if (to_push == last_pushed) continue;
959 if (to_push > maximum_sum) break;
960 last_pushed = to_push;
961 new_sums_.push_back(to_push);
962 }
963 std::swap(sums_, new_sums_);
964 }
965 return sums_;
966}
967
969 int64_t bin_size) {
970 double estimate = std::numeric_limits<double>::infinity();
971 if (num_elements < 100) {
972 estimate = 2 * pow(2.0, num_elements / 2);
973 }
974 return std::min(estimate, static_cast<double>(bin_size) *
975 static_cast<double>(num_elements));
976}
977
979 absl::Span<const int64_t> elements, int64_t bin_size) {
980 // Get rid of a few easy to decide corner cases.
981 if (elements.empty()) return 0;
982 if (elements.size() == 1) {
983 if (elements[0] > bin_size) return 0;
984 return elements[0];
985 }
986
987 // We split the elements in two equal-sized parts.
988 const int middle = elements.size() / 2;
989 const auto span_a = sums_a_.Compute(elements.subspan(0, middle), bin_size,
990 /*abort_if_maximum_reached=*/true);
991 if (span_a.back() == bin_size) return bin_size;
992
993 const auto span_b = sums_b_.Compute(elements.subspan(middle), bin_size,
994 /*abort_if_maximum_reached=*/true);
995 if (span_b.back() == bin_size) return bin_size;
996
997 // For all possible sum a, we compute the largest sum b that fits.
998 // We do that in linear time thanks to the sorted partial sums.
999 int64_t result = 0;
1000 CHECK(!span_a.empty());
1001 CHECK(!span_b.empty());
1002 int j = span_b.size() - 1;
1003 for (int i = 0; i < span_a.size(); ++i) {
1004 while (j >= 0 && span_a[i] + span_b[j] > bin_size) --j;
1005 result = std::max(result, span_a[i] + span_b[j]);
1006 if (result == bin_size) return bin_size;
1007 }
1008 return result;
1009}
1010
1011} // namespace sat
1012} // namespace operations_research
Domain IntersectionWith(const Domain &domain) const
bool Contains(int64_t value) const
Domain AdditionWith(const Domain &domain) const
Domain InverseMultiplicationBy(int64_t coeff) const
static IntegralType CeilOfRatio(IntegralType numerator, IntegralType denominator)
Definition mathutil.h:40
static IntegralType FloorOfRatio(IntegralType numerator, IntegralType denominator)
Definition mathutil.h:54
Result Solve(absl::Span< const Domain > domains, absl::Span< const int64_t > coeffs, absl::Span< const int64_t > costs, const Domain &rhs)
Definition util.cc:648
void Reset(double reset_value)
Sets the number of records to 0 and average to 'reset_value'.
Definition util.cc:445
double ComplexityEstimate(int num_elements, int64_t bin_size)
Definition util.cc:968
int64_t MaxSubsetSum(absl::Span< const int64_t > elements, int64_t bin_size)
Definition util.cc:978
void AddChoices(absl::Span< const int64_t > choices)
Definition util.cc:514
int64_t MaxIfAdded(int64_t candidate) const
Returns the updated max if value was added to the subset-sum.
Definition util.cc:621
void Add(int64_t value)
Add a value to the base set for which subset sums will be taken.
Definition util.cc:507
void AddMultiples(int64_t coeff, int64_t max_value)
Adds [0, coeff, 2 * coeff, ... max_value * coeff].
Definition util.cc:538
void AddRecord(double record)
Definition util.cc:462
double GetPercentile(double percent)
Definition util.cc:469
absl::Span< const int64_t > Compute(absl::Span< const int64_t > elements, int64_t maximum_sum, bool abort_if_maximum_reached=false)
Definition util.cc:906
const bool DEBUG_MODE
Definition macros.h:26
void RandomizeDecisionHeuristic(absl::BitGenRef random, SatParameters *parameters)
Randomizes the decision heuristic of the given SatParameters.
Definition util.cc:101
int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs)
Definition util.cc:182
bool SolveDiophantineEquationOfSizeTwo(int64_t &a, int64_t &b, int64_t &cte, int64_t &x0, int64_t &y0)
Definition util.cc:204
int64_t FloorSquareRoot(int64_t a)
The argument must be non-negative.
Definition util.cc:300
int64_t CeilSquareRoot(int64_t a)
Definition util.cc:309
int64_t ModularInverse(int64_t x, int64_t m)
Definition util.cc:144
int WeightedPick(absl::Span< const double > input, absl::BitGenRef random)
Definition util.cc:426
std::string FormatCounter(int64_t num)
Prints a positive number with separators for easier reading (ex: 1'348'065).
Definition util.cc:44
bool DiophantineEquationOfSizeTwoHasSolutionInDomain(const Domain &x, int64_t a, const Domain &y, int64_t b, int64_t cte)
Definition util.cc:248
int64_t ClosestMultiple(int64_t value, int64_t base)
Definition util.cc:317
int64_t PositiveMod(int64_t x, int64_t m)
Just returns x % m but with a result always in [0, m).
Definition util.cc:177
bool LinearInequalityCanBeReducedWithClosestMultiple(int64_t base, absl::Span< const int64_t > coeffs, absl::Span< const int64_t > lbs, absl::Span< const int64_t > ubs, int64_t rhs, int64_t *new_rhs)
Definition util.cc:324
int MoveOneUnprocessedLiteralLast(const absl::btree_set< LiteralIndex > &processed, int relevant_prefix_size, std::vector< Literal > *literals)
Definition util.cc:388
std::vector< absl::Span< int > > AtMostOneDecomposition(const std::vector< std::vector< int > > &graph, absl::BitGenRef random, std::vector< int > *buffer)
Definition util.cc:894
std::string FormatTable(std::vector< std::vector< std::string > > &table, int spacing)
Definition util.cc:72
In SWIG mode, we don't want anything besides these top-level includes.
int64_t CapProd(int64_t x, int64_t y)
trees with all degrees equal w the current value of w
static int input(yyscan_t yyscanner)