Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
encoding.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stddef.h>
17
18#include <algorithm>
19#include <deque>
20#include <functional>
21#include <limits>
22#include <queue>
23#include <string>
24#include <utility>
25#include <vector>
26
27#include "absl/container/flat_hash_map.h"
28#include "absl/log/check.h"
29#include "absl/strings/str_cat.h"
30#include "absl/types/span.h"
32#include "ortools/sat/boolean_problem.pb.h"
35#include "ortools/sat/sat_parameters.pb.h"
37#include "ortools/sat/util.h"
39
40namespace operations_research {
41namespace sat {
42
44 EncodingNode node;
45 node.lb_ = 1;
46 node.ub_ = 1;
47 node.weight_lb_ = 0;
48 node.weight_ = weight;
49 return node;
50}
51
53 EncodingNode node;
54 node.lb_ = 0;
55 node.weight_lb_ = 0;
56 node.ub_ = 1;
57 node.weight_ = weight;
58 node.for_sorting_ = l.Variable();
59 node.literals_ = {l};
60 return node;
61}
62
64 std::function<Literal(int x)> create_lit,
65 Coefficient weight) {
66 EncodingNode node;
67 node.lb_ = lb;
68 node.ub_ = ub;
69 node.weight_lb_ = 0;
70 node.create_lit_ = std::move(create_lit);
71 node.weight_ = weight;
72
73 node.literals_.push_back(node.create_lit_(lb));
74
75 // TODO(user): Not ideal, we should probably just provide index in the
76 // original objective for sorting purpose.
77 node.for_sorting_ = node.literals_[0].Variable();
78 return node;
79}
80
82 SatSolver* solver) {
83 CHECK(literals_.empty()) << "Already initialized";
84 CHECK_GT(n, 0);
85 const BooleanVariable first_var_index(solver->NumVariables());
86 solver->SetNumVariables(solver->NumVariables() + n);
87 for (int i = 0; i < n; ++i) {
88 literals_.push_back(Literal(first_var_index + i, true));
89 if (i > 0) {
90 solver->AddBinaryClause(literal(i - 1), literal(i).Negated());
91 }
92 }
93 lb_ = a->lb_ + b->lb_;
94 ub_ = lb_ + n;
95 depth_ = 1 + std::max(a->depth_, b->depth_);
96 child_a_ = a;
97 child_b_ = b;
98 for_sorting_ = first_var_index;
99}
100
101void EncodingNode::InitializeAmoNode(absl::Span<EncodingNode* const> nodes,
102 SatSolver* solver) {
103 CHECK_GE(nodes.size(), 2);
104 CHECK(literals_.empty()) << "Already initialized";
105 const BooleanVariable var = solver->NewBooleanVariable();
106 const Literal new_literal(var, true);
107 literals_.push_back(new_literal);
108 child_a_ = nullptr;
109 child_b_ = nullptr;
110 lb_ = 0;
111 ub_ = 1;
112 depth_ = 0;
113 for_sorting_ = var;
114 std::vector<Literal> clause{new_literal.Negated()};
115 for (const EncodingNode* node : nodes) {
116 // node_lit => new_lit.
117 clause.push_back(node->literals_[0]);
118 solver->AddBinaryClause(node->literals_[0].Negated(), new_literal);
119 depth_ = std::max(node->depth_ + 1, depth_);
120 for_sorting_ = std::min(for_sorting_, node->for_sorting_);
121 }
122
123 // If new_literal is true then one of the lit must be true.
124 // Note that this is not needed for correctness though.
125 solver->AddProblemClause(clause);
126}
127
129 SatSolver* solver) {
130 CHECK(literals_.empty()) << "Already initialized";
131 const BooleanVariable first_var_index(solver->NumVariables());
132 solver->SetNumVariables(solver->NumVariables() + 1);
133 literals_.emplace_back(first_var_index, true);
134 child_a_ = a;
135 child_b_ = b;
136 ub_ = a->ub_ + b->ub_;
137 lb_ = a->lb_ + b->lb_;
138 depth_ = 1 + std::max(a->depth_, b->depth_);
139
140 // Merging the node of the same depth in order seems to help a bit.
141 for_sorting_ = std::min(a->for_sorting_, b->for_sorting_);
142}
143
145 EncodingNode* b) {
146 CHECK(literals_.empty()) << "Already initialized";
147 child_a_ = a;
148 child_b_ = b;
149 ub_ = a->ub_ + b->ub_;
150 weight_ = weight;
151 weight_lb_ = a->lb_ + b->lb_;
152 lb_ = weight_lb_ + 1;
153 depth_ = 1 + std::max(a->depth_, b->depth_);
154
155 // Merging the node of the same depth in order seems to help a bit.
156 for_sorting_ = std::min(a->for_sorting_, b->for_sorting_);
157}
158
160 if (current_ub() == ub_) return false;
161 if (create_lit_ != nullptr) {
162 literals_.emplace_back(create_lit_(current_ub()));
163 } else {
164 CHECK_NE(solver, nullptr);
165 literals_.emplace_back(BooleanVariable(solver->NumVariables()), true);
166 solver->SetNumVariables(solver->NumVariables() + 1);
167 }
168 if (literals_.size() > 1) {
169 solver->AddBinaryClause(literals_.back().Negated(),
170 literals_[literals_.size() - 2]);
171 }
172 return true;
173}
174
175Coefficient EncodingNode::Reduce(const SatSolver& solver) {
176 if (!literals_.empty()) {
177 int i = 0;
178 while (i < literals_.size() &&
179 solver.Assignment().LiteralIsTrue(literals_[i])) {
180 ++i;
181 ++lb_;
182 }
183 literals_.erase(literals_.begin(), literals_.begin() + i);
184 while (!literals_.empty() &&
185 solver.Assignment().LiteralIsFalse(literals_.back())) {
186 literals_.pop_back();
187 ub_ = lb_ + literals_.size();
188 }
189 }
190
191 if (weight_lb_ >= lb_) return Coefficient(0);
192 const Coefficient result = Coefficient(lb_ - weight_lb_) * weight_;
193 weight_lb_ = lb_;
194 return result;
195}
196
197void EncodingNode::ApplyWeightUpperBound(Coefficient gap, SatSolver* solver) {
198 CHECK_GT(weight_, 0);
199 const Coefficient num_allowed = (gap / weight_);
200 if (num_allowed > std::numeric_limits<int>::max() / 2) return;
201 const int new_size =
202 std::max(0, (weight_lb_ - lb_) + static_cast<int>(num_allowed.value()));
203 if (size() <= new_size) return;
204 for (int i = new_size; i < size(); ++i) {
205 if (!solver->AddUnitClause(literal(i).Negated())) return;
206 }
207 literals_.resize(new_size);
208 ub_ = lb_ + new_size;
209}
210
212 if (size() > 1) {
213 for (int i = 1; i < size(); ++i) {
214 if (!solver->AddUnitClause(literal(i).Negated())) return;
215 }
216 literals_.resize(1);
217 ub_ = lb_ + 1;
218 return;
219 }
220
221 if (current_ub() == ub_) return;
222
223 // TODO(user): Avoid creating a Boolean just to fix it!
224 IncreaseNodeSize(this, solver);
225 CHECK_EQ(size(), 2);
226 if (!solver->AddUnitClause(literal(1).Negated())) return;
227 literals_.resize(1);
228 ub_ = lb_ + 1;
229}
230
232 DCHECK(!HasNoWeight());
233 const int index = weight_lb_ - lb_;
234 return index < literals_.size() && literals_[index].Negated() == other;
235}
236
238 CHECK(!HasNoWeight());
239 const int index = weight_lb_ - lb_;
240 CHECK_GE(index, 0) << "Not reduced?";
241 while (index >= literals_.size()) {
242 IncreaseNodeSize(this, solver);
243 }
244 return literals_[index].Negated();
245}
246
248 CHECK_LT(weight_lb_ - lb_, literals_.size());
249 weight_lb_++;
250}
251
253 return weight_ == 0 || weight_lb_ >= ub_;
254}
255
257 const VariablesAssignment& assignment) const {
258 std::string result;
259 absl::StrAppend(&result, "depth:", depth_);
260 absl::StrAppend(&result, " [", lb_, ",", lb_ + literals_.size(), "]");
261 absl::StrAppend(&result, " ub:", ub_);
262 absl::StrAppend(&result, " weight:", weight_.value());
263 absl::StrAppend(&result, " weight_lb:", weight_lb_);
264 absl::StrAppend(&result, " values:");
265 const size_t limit = 20;
266 int value = 0;
267 for (int i = 0; i < std::min(literals_.size(), limit); ++i) {
268 char c = '?';
269 if (assignment.LiteralIsTrue(literals_[i])) {
270 c = '1';
271 value = i + 1;
272 } else if (assignment.LiteralIsFalse(literals_[i])) {
273 c = '0';
274 }
275 result += c;
276 }
277 absl::StrAppend(&result, " val:", lb_ + value);
278 return result;
279}
280
282 EncodingNode n;
283 n.InitializeLazyNode(a, b, solver);
284 solver->AddBinaryClause(a->literal(0).Negated(), n.literal(0));
285 solver->AddBinaryClause(b->literal(0).Negated(), n.literal(0));
286 solver->AddTernaryClause(n.literal(0).Negated(), a->literal(0),
287 b->literal(0));
288 return n;
289}
290
292 if (!node->IncreaseCurrentUB(solver)) return;
293 std::vector<EncodingNode*> to_process;
294 to_process.push_back(node);
295
296 // Only one side of the constraint is mandatory (the one propagating the ones
297 // to the top of the encoding tree), and it seems more efficient not to encode
298 // the other side.
299 //
300 // TODO(user): Experiment more.
301 const bool complete_encoding = false;
302
303 while (!to_process.empty()) {
304 EncodingNode* n = to_process.back();
305 EncodingNode* a = n->child_a();
306 EncodingNode* b = n->child_b();
307 to_process.pop_back();
308
309 // Integer leaf node.
310 if (a == nullptr) continue;
311 CHECK_NE(solver, nullptr);
312
313 // Note that since we were able to increase its size, n must have children.
314 // n->GreaterThan(target) is the new literal of n.
315 CHECK(a != nullptr);
316 CHECK(b != nullptr);
317 const int target = n->current_ub() - 1;
318
319 // Add a literal to a if needed.
320 // That is, now that the node n can go up to it new current_ub, if we need
321 // to increase the current_ub of a.
322 if (a->current_ub() != a->ub()) {
323 CHECK_GE(a->current_ub() - 1 + b->lb(), target - 1);
324 if (a->current_ub() - 1 + b->lb() < target) {
325 CHECK(a->IncreaseCurrentUB(solver));
326 to_process.push_back(a);
327 }
328 }
329
330 // Add a literal to b if needed.
331 if (b->current_ub() != b->ub()) {
332 CHECK_GE(b->current_ub() - 1 + a->lb(), target - 1);
333 if (b->current_ub() - 1 + a->lb() < target) {
334 CHECK(b->IncreaseCurrentUB(solver));
335 to_process.push_back(b);
336 }
337 }
338
339 // Wire the new literal of n correctly with its two children.
340 for (int ia = a->lb(); ia < a->current_ub(); ++ia) {
341 const int ib = target - ia;
342 if (complete_encoding && ib >= b->lb() && ib < b->current_ub()) {
343 // if x <= ia and y <= ib then x + y <= ia + ib.
344 solver->AddTernaryClause(n->GreaterThan(target).Negated(),
345 a->GreaterThan(ia), b->GreaterThan(ib));
346 }
347 if (complete_encoding && ib == b->ub()) {
348 solver->AddBinaryClause(n->GreaterThan(target).Negated(),
349 a->GreaterThan(ia));
350 }
351
352 if (ib - 1 == b->lb() - 1) {
353 solver->AddBinaryClause(n->GreaterThan(target),
354 a->GreaterThan(ia).Negated());
355 }
356 if ((ib - 1) >= b->lb() && (ib - 1) < b->current_ub()) {
357 // if x > ia and y > ib - 1 then x + y > ia + ib.
358 solver->AddTernaryClause(n->GreaterThan(target),
359 a->GreaterThan(ia).Negated(),
360 b->GreaterThan(ib - 1).Negated());
361 }
362 }
363
364 // Case ia = a->lb() - 1; a->GreaterThan(ia) always true.
365 {
366 const int ib = target - (a->lb() - 1);
367 if ((ib - 1) == b->lb() - 1) {
368 if (!solver->AddUnitClause(n->GreaterThan(target))) return;
369 }
370 if ((ib - 1) >= b->lb() && (ib - 1) < b->current_ub()) {
371 solver->AddBinaryClause(n->GreaterThan(target),
372 b->GreaterThan(ib - 1).Negated());
373 }
374 }
375
376 // case ia == a->ub; a->GreaterThan(ia) always false.
377 {
378 const int ib = target - a->ub();
379 if (complete_encoding && ib >= b->lb() && ib < b->current_ub()) {
380 solver->AddBinaryClause(n->GreaterThan(target).Negated(),
381 b->GreaterThan(ib));
382 }
383 if (ib == b->ub()) {
384 if (!solver->AddUnitClause(n->GreaterThan(target).Negated())) return;
385 }
386 }
387 }
388}
389
390EncodingNode FullMerge(Coefficient upper_bound, EncodingNode* a,
391 EncodingNode* b, SatSolver* solver) {
392 EncodingNode n;
393 const int size =
394 std::min(Coefficient(a->size() + b->size()), upper_bound).value();
395 n.InitializeFullNode(size, a, b, solver);
396 for (int ia = 0; ia < a->size(); ++ia) {
397 if (ia + b->size() < size) {
398 solver->AddBinaryClause(n.literal(ia + b->size()).Negated(),
399 a->literal(ia));
400 }
401 if (ia < size) {
402 solver->AddBinaryClause(n.literal(ia), a->literal(ia).Negated());
403 } else {
404 // Fix the variable to false because of the given upper_bound.
405 if (!solver->AddUnitClause(a->literal(ia).Negated())) return n;
406 }
407 }
408 for (int ib = 0; ib < b->size(); ++ib) {
409 if (ib + a->size() < size) {
410 solver->AddBinaryClause(n.literal(ib + a->size()).Negated(),
411 b->literal(ib));
412 }
413 if (ib < size) {
414 solver->AddBinaryClause(n.literal(ib), b->literal(ib).Negated());
415 } else {
416 // Fix the variable to false because of the given upper_bound.
417 if (!solver->AddUnitClause(b->literal(ib).Negated())) return n;
418 }
419 }
420 for (int ia = 0; ia < a->size(); ++ia) {
421 for (int ib = 0; ib < b->size(); ++ib) {
422 if (ia + ib < size) {
423 // if x <= ia and y <= ib, then x + y <= ia + ib.
424 solver->AddTernaryClause(n.literal(ia + ib).Negated(), a->literal(ia),
425 b->literal(ib));
426 }
427 if (ia + ib + 1 < size) {
428 // if x > ia and y > ib, then x + y > ia + ib + 1.
429 solver->AddTernaryClause(n.literal(ia + ib + 1),
430 a->literal(ia).Negated(),
431 b->literal(ib).Negated());
432 } else {
433 solver->AddBinaryClause(a->literal(ia).Negated(),
434 b->literal(ib).Negated());
435 }
436 }
437 }
438 return n;
439}
440
441EncodingNode* MergeAllNodesWithDeque(Coefficient upper_bound,
442 const std::vector<EncodingNode*>& nodes,
443 SatSolver* solver,
444 std::deque<EncodingNode>* repository) {
445 std::deque<EncodingNode*> dq(nodes.begin(), nodes.end());
446 while (dq.size() > 1) {
447 EncodingNode* a = dq.front();
448 dq.pop_front();
449 EncodingNode* b = dq.front();
450 dq.pop_front();
451 repository->push_back(FullMerge(upper_bound, a, b, solver));
452 dq.push_back(&repository->back());
453 }
454 return dq.front();
455}
456
457namespace {
458struct SortEncodingNodePointers {
459 bool operator()(EncodingNode* a, EncodingNode* b) const { return *a < *b; }
460};
461} // namespace
462
464 Coefficient weight, const std::vector<EncodingNode*>& nodes,
465 SatSolver* solver, std::deque<EncodingNode>* repository) {
466 std::priority_queue<EncodingNode*, std::vector<EncodingNode*>,
467 SortEncodingNodePointers>
468 pq(nodes.begin(), nodes.end());
469 while (pq.size() > 2) {
470 EncodingNode* a = pq.top();
471 pq.pop();
472 EncodingNode* b = pq.top();
473 pq.pop();
474 repository->push_back(LazyMerge(a, b, solver));
475 pq.push(&repository->back());
476 }
477
478 CHECK_EQ(pq.size(), 2);
479 EncodingNode* a = pq.top();
480 pq.pop();
481 EncodingNode* b = pq.top();
482 pq.pop();
483
484 repository->push_back(EncodingNode());
485 EncodingNode* n = &repository->back();
486 n->InitializeLazyCoreNode(weight, a, b);
487 solver->AddBinaryClause(a->literal(0), b->literal(0));
488 return n;
489}
490
491namespace {
492
493bool EncodingNodeByWeight(const EncodingNode* a, const EncodingNode* b) {
494 return a->weight() < b->weight();
495}
496
497bool EncodingNodeByDepth(const EncodingNode* a, const EncodingNode* b) {
498 return a->depth() < b->depth();
499}
500
501} // namespace
502
503void ReduceNodes(Coefficient upper_bound, Coefficient* lower_bound,
504 std::vector<EncodingNode*>* nodes, SatSolver* solver) {
505 // Remove the left-most variables fixed to one from each node.
506 // Also update the lower_bound. Note that Reduce() needs the solver to be
507 // at the root node in order to work.
508 solver->Backtrack(0);
509 for (EncodingNode* n : *nodes) {
510 *lower_bound += n->Reduce(*solver);
511 }
512
513 // Fix the nodes right-most variables that are above the gap.
514 // If we closed the problem, we abort and return and empty vector.
515 if (upper_bound != kCoefficientMax) {
516 const Coefficient gap = upper_bound - *lower_bound;
517 if (gap < 0) {
518 nodes->clear();
519 return;
520 }
521 for (EncodingNode* n : *nodes) {
522 n->ApplyWeightUpperBound(gap, solver);
523 }
524 }
525
526 // Remove the empty nodes.
527 nodes->erase(std::remove_if(nodes->begin(), nodes->end(),
528 [](EncodingNode* a) { return a->HasNoWeight(); }),
529 nodes->end());
530
531 // Sort the nodes.
532 switch (solver->parameters().max_sat_assumption_order()) {
533 case SatParameters::DEFAULT_ASSUMPTION_ORDER:
534 break;
535 case SatParameters::ORDER_ASSUMPTION_BY_DEPTH:
536 std::sort(nodes->begin(), nodes->end(), EncodingNodeByDepth);
537 break;
538 case SatParameters::ORDER_ASSUMPTION_BY_WEIGHT:
539 std::sort(nodes->begin(), nodes->end(), EncodingNodeByWeight);
540 break;
541 }
542 if (solver->parameters().max_sat_reverse_assumption_order()) {
543 // TODO(user): with DEFAULT_ASSUMPTION_ORDER, this will lead to a somewhat
544 // weird behavior, since we will reverse the nodes at each iteration...
545 std::reverse(nodes->begin(), nodes->end());
546 }
547}
548
549std::vector<Literal> ExtractAssumptions(Coefficient stratified_lower_bound,
550 const std::vector<EncodingNode*>& nodes,
551 SatSolver* solver) {
552 // Extract the assumptions from the nodes.
553 std::vector<Literal> assumptions;
554 for (EncodingNode* n : nodes) {
555 if (n->weight() >= stratified_lower_bound) {
556 assumptions.push_back(n->GetAssumption(solver));
557 }
558 }
559 return assumptions;
560}
561
562Coefficient ComputeCoreMinWeight(const std::vector<EncodingNode*>& nodes,
563 absl::Span<const Literal> core) {
564 Coefficient min_weight = kCoefficientMax;
565 int index = 0;
566 for (int i = 0; i < core.size(); ++i) {
567 for (; index < nodes.size() && !nodes[index]->AssumptionIs(core[i]);
568 ++index) {
569 }
570 CHECK_LT(index, nodes.size());
571 min_weight = std::min(min_weight, nodes[index]->weight());
572 }
573 return min_weight;
574}
575
576Coefficient MaxNodeWeightSmallerThan(const std::vector<EncodingNode*>& nodes,
577 Coefficient upper_bound) {
578 Coefficient result(0);
579 for (EncodingNode* n : nodes) {
580 CHECK_GT(n->weight(), 0);
581 if (n->weight() < upper_bound) {
582 result = std::max(result, n->weight());
583 }
584 }
585 return result;
586}
587
588bool ObjectiveEncoder::ProcessCore(absl::Span<const Literal> core,
589 Coefficient min_weight, Coefficient gap,
590 std::string* info) {
591 // Backtrack to be able to add new constraints.
592 if (!sat_solver_->ResetToLevelZero()) return false;
593 if (core.size() == 1) {
594 return sat_solver_->AddUnitClause(core[0].Negated());
595 }
596
597 // Remove from nodes the EncodingNode in the core and put them in to_merge.
598 std::vector<EncodingNode*> to_merge;
599 {
600 int index = 0;
601 std::vector<EncodingNode*> new_nodes;
602 for (int i = 0; i < core.size(); ++i) {
603 // Since the nodes appear in order in the core, we can find the
604 // relevant "objective" variable efficiently with a simple linear scan
605 // in the nodes vector (done with index).
606 for (; !nodes_[index]->AssumptionIs(core[i]); ++index) {
607 CHECK_LT(index, nodes_.size());
608 new_nodes.push_back(nodes_[index]);
609 }
610 CHECK_LT(index, nodes_.size());
611 EncodingNode* node = nodes_[index];
612
613 // TODO(user): propagate proper ub first.
614 if (alternative_encoding_ && node->ub() > node->lb() + 1) {
615 // We can distinguish the first literal of the node.
616 // By not counting its weight in node, and creating a new node for it.
617 node->IncreaseWeightLb();
618 new_nodes.push_back(node);
619
620 // Now keep processing with this new node instead.
621 repository_.push_back(
622 EncodingNode::LiteralNode(core[i].Negated(), node->weight()));
623 repository_.back().set_depth(node->depth());
624 node = &repository_.back();
625 }
626
627 to_merge.push_back(node);
628
629 // Special case if the weight > min_weight. we keep it, but reduce its
630 // cost. This is the same "trick" as in WPM1 used to deal with weight.
631 // We basically split a clause with a larger weight in two identical
632 // clauses, one with weight min_weight that will be merged and one with
633 // the remaining weight.
634 if (node->weight() > min_weight) {
635 node->set_weight(node->weight() - min_weight);
636 new_nodes.push_back(node);
637 }
638 ++index;
639 }
640 for (; index < nodes_.size(); ++index) {
641 new_nodes.push_back(nodes_[index]);
642 }
643 nodes_ = new_nodes;
644 }
645
646 // Are the literal in amo relationship?
647 // - If min_weight is large enough, we can infer that.
648 // - If the size is small we can infer this via propagation.
649 bool in_exactly_one = (2 * min_weight) > gap;
650
651 // Amongst the node to merge, if many are boolean nodes in an "at most one"
652 // relationship, it is super advantageous to exploit it during merging as we
653 // can regroup all nodes from an at most one in a single new node with a depth
654 // of 1.
655 if (!in_exactly_one) {
656 // Collect "boolean nodes".
657 std::vector<Literal> bool_nodes;
658 absl::flat_hash_map<LiteralIndex, int> node_indices;
659 for (int i = 0; i < to_merge.size(); ++i) {
660 const EncodingNode& node = *to_merge[i];
661
662 if (node.size() != 1) continue;
663 if (node.ub() != node.lb() + 1) continue;
664 if (node.weight_lb() != node.lb()) continue;
665 if (node_indices.contains(node.literal(0).Index())) continue;
666 node_indices[node.literal(0).Index()] = i;
667 bool_nodes.push_back(node.literal(0));
668 }
669
670 // For "small" core, with O(n) full propagation, we can discover possible
671 // at most ones. This is a bit costly but can significantly reduce the
672 // number of Booleans needed and has a good positive impact.
673 std::vector<int> buffer;
674 std::vector<absl::Span<const Literal>> decomposition;
675 if (params_.core_minimization_level() > 1 && bool_nodes.size() < 300 &&
676 bool_nodes.size() > 1) {
677 const auto& assignment = sat_solver_->Assignment();
678 const int size = bool_nodes.size();
679 std::vector<std::vector<int>> graph(size);
680 for (int i = 0; i < size; ++i) {
681 if (!sat_solver_->ResetToLevelZero()) return false;
682 if (!sat_solver_->EnqueueDecisionIfNotConflicting(bool_nodes[i])) {
683 // TODO(user): this node is closed and can be removed from the core.
684 continue;
685 }
686 for (int j = 0; j < size; ++j) {
687 if (i == j) continue;
688 if (assignment.LiteralIsFalse(bool_nodes[j])) {
689 graph[i].push_back(j);
690
691 // Unit propagation is not always symmetric.
692 graph[j].push_back(i);
693 }
694
695 // TODO(user): If assignment.LiteralIsTrue(bool_nodes[j]) We can
696 // minimize the core here by removing bool_nodes[i] from it. Note
697 // however that since we already minimized the core, this is
698 // unlikely to happen.
699 }
700 }
701 if (!sat_solver_->ResetToLevelZero()) return false;
702
703 for (std::vector<int>& adj : graph) {
705 }
706 const std::vector<absl::Span<int>> index_decompo =
707 AtMostOneDecomposition(graph, *random_, &buffer);
708
709 // Convert.
710 std::vector<Literal> new_order;
711 for (const int i : buffer) new_order.push_back(bool_nodes[i]);
712 bool_nodes = new_order;
713 for (const auto span : index_decompo) {
714 if (span.size() == 1) continue;
715 decomposition.push_back(absl::MakeSpan(
716 bool_nodes.data() + (span.data() - buffer.data()), span.size()));
717 }
718 } else {
719 decomposition = implications_->HeuristicAmoPartition(&bool_nodes);
720 }
721
722 // Same case as above, all the nodes in the core are in a exactly_one.
723 if (decomposition.size() == 1 && decomposition[0].size() == core.size()) {
724 in_exactly_one = true;
725 }
726
727 int num_in_decompo = 0;
728 if (!in_exactly_one) {
729 for (const auto amo : decomposition) {
730 num_in_decompo += amo.size();
731
732 // Extract Amo nodes and set them to nullptr in to_merge.
733 std::vector<EncodingNode*> amo_nodes;
734 for (const Literal l : amo) {
735 const int index = node_indices.at(l.Index());
736 amo_nodes.push_back(to_merge[index]);
737 to_merge[index] = nullptr;
738 }
739
740 // Create the new node with proper constraints and weight.
741 repository_.push_back(EncodingNode());
742 EncodingNode* n = &repository_.back();
743 n->InitializeAmoNode(amo_nodes, sat_solver_);
744 n->set_weight(min_weight);
745 to_merge.push_back(n);
746 }
747 if (num_in_decompo > 0) {
748 absl::StrAppend(info, " amo:", decomposition.size(),
749 " lit:", num_in_decompo);
750 }
751
752 // Clean-up to_merge.
753 int new_size = 0;
754 for (EncodingNode* node : to_merge) {
755 if (node == nullptr) continue;
756 to_merge[new_size++] = node;
757 }
758 to_merge.resize(new_size);
759 }
760 }
761
762 // If all the literal of the core are in at_most_one, the core is actually an
763 // exactly_one. We just subtracted the min_cost above. We just have to enqueue
764 // a constant node with min_weight for the rest of the code to work.
765 if (in_exactly_one) {
766 // Tricky: We need to enforce an upper bound of 1 on the nodes.
767 for (EncodingNode* node : to_merge) {
768 node->TransformToBoolean(sat_solver_);
769 }
770
771 absl::StrAppend(info, " exo");
772 repository_.push_back(EncodingNode::ConstantNode(min_weight));
773 nodes_.push_back(&repository_.back());
774
775 // The negation of the literal in the core are in exactly one.
776 // TODO(user): If we infered the exactly one from the binary implication
777 // graph, there is no need to add the amo since it is already there.
778 std::vector<LiteralWithCoeff> cst;
779 cst.reserve(core.size());
780 for (const Literal l : core) {
781 cst.emplace_back(l.Negated(), Coefficient(1));
782 }
783 sat_solver_->AddLinearConstraint(
784 /*use_lower_bound=*/true, Coefficient(1),
785 /*use_upper_bound=*/true, Coefficient(1), &cst);
786 return !sat_solver_->ModelIsUnsat();
787 }
788
790 min_weight, to_merge, sat_solver_, &repository_));
791 absl::StrAppend(info, " d:", nodes_.back()->depth());
792 return !sat_solver_->ModelIsUnsat();
793}
794
795} // namespace sat
796} // namespace operations_research
void InitializeLazyCoreNode(Coefficient weight, EncodingNode *a, EncodingNode *b)
Definition encoding.cc:144
Literal GreaterThan(int i) const
Definition encoding.h:97
bool AssumptionIs(Literal other) const
GetAssumption() might need to create new literals.
Definition encoding.cc:231
void InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition encoding.cc:81
Coefficient Reduce(const SatSolver &solver)
Definition encoding.cc:175
void InitializeLazyNode(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition encoding.cc:128
int size() const
Accessors to size() and literals in [lb, current_ub).
Definition encoding.h:100
std::string DebugString(const VariablesAssignment &assignment) const
We use the solver to display the current values of the literals.
Definition encoding.cc:256
static EncodingNode GenericNode(int lb, int ub, std::function< Literal(int x)> create_lit, Coefficient weight)
Definition encoding.cc:63
static EncodingNode ConstantNode(Coefficient weight)
Definition encoding.cc:43
void ApplyWeightUpperBound(Coefficient gap, SatSolver *solver)
Definition encoding.cc:197
bool IncreaseCurrentUB(SatSolver *solver)
Definition encoding.cc:159
static EncodingNode LiteralNode(Literal l, Coefficient weight)
Definition encoding.cc:52
void TransformToBoolean(SatSolver *solver)
Indicate that the node cannot grow further than its current assumption.
Definition encoding.cc:211
EncodingNode * child_b() const
Definition encoding.h:154
EncodingNode * child_a() const
Definition encoding.h:153
void InitializeAmoNode(absl::Span< EncodingNode *const > nodes, SatSolver *solver)
Definition encoding.cc:101
Literal GetAssumption(SatSolver *solver)
Definition encoding.cc:237
LiteralIndex Index() const
Definition sat_base.h:91
BooleanVariable Variable() const
Definition sat_base.h:87
bool ProcessCore(absl::Span< const Literal > core, Coefficient min_weight, Coefficient gap, std::string *info)
Definition encoding.cc:588
BooleanVariable NewBooleanVariable()
Definition sat_solver.h:90
bool AddProblemClause(absl::Span< const Literal > literals)
bool AddBinaryClause(Literal a, Literal b)
Same as AddProblemClause() below, but for small clauses.
void Backtrack(int target_level)
const SatParameters & parameters() const
ABSL_MUST_USE_RESULT bool AddUnitClause(Literal true_literal)
bool AddTernaryClause(Literal a, Literal b, Literal c)
const VariablesAssignment & Assignment() const
Definition sat_solver.h:393
void SetNumVariables(int num_variables)
Definition sat_solver.cc:84
bool LiteralIsFalse(Literal literal) const
Definition sat_base.h:185
bool LiteralIsTrue(Literal literal) const
Definition sat_base.h:188
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:58
EncodingNode * MergeAllNodesWithDeque(Coefficient upper_bound, const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition encoding.cc:441
std::vector< Literal > ExtractAssumptions(Coefficient stratified_lower_bound, const std::vector< EncodingNode * > &nodes, SatSolver *solver)
Definition encoding.cc:549
void ReduceNodes(Coefficient upper_bound, Coefficient *lower_bound, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition encoding.cc:503
Coefficient ComputeCoreMinWeight(const std::vector< EncodingNode * > &nodes, absl::Span< const Literal > core)
Definition encoding.cc:562
EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition encoding.cc:281
EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition encoding.cc:390
EncodingNode * LazyMergeAllNodeWithPQAndIncreaseLb(Coefficient weight, const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition encoding.cc:463
void IncreaseNodeSize(EncodingNode *node, SatSolver *solver)
Definition encoding.cc:291
std::vector< absl::Span< int > > AtMostOneDecomposition(const std::vector< std::vector< int > > &graph, absl::BitGenRef random, std::vector< int > *buffer)
Definition util.cc:894
Coefficient MaxNodeWeightSmallerThan(const std::vector< EncodingNode * > &nodes, Coefficient upper_bound)
Definition encoding.cc:576
const Coefficient kCoefficientMax(std::numeric_limits< Coefficient::ValueType >::max())
In SWIG mode, we don't want anything besides these top-level includes.