Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
clause.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/clause.h"
15
16#include <stddef.h>
17
18#include <algorithm>
19#include <cstdint>
20#include <deque>
21#include <new>
22#include <queue>
23#include <string>
24#include <utility>
25#include <vector>
26
27#include "absl/container/flat_hash_map.h"
28#include "absl/container/flat_hash_set.h"
29#include "absl/container/inlined_vector.h"
30#include "absl/log/check.h"
31#include "absl/log/log.h"
32#include "absl/log/vlog_is_on.h"
33#include "absl/random/bit_gen_ref.h"
34#include "absl/random/distributions.h"
35#include "absl/types/span.h"
39#include "ortools/base/timer.h"
43#include "ortools/sat/model.h"
45#include "ortools/sat/util.h"
46#include "ortools/util/bitset.h"
47#include "ortools/util/stats.h"
50namespace operations_research {
51namespace sat {
52
53namespace {
54
55// Returns true if the given watcher list contains the given clause.
56template <typename Watcher>
57bool WatcherListContains(const std::vector<Watcher>& list,
58 const SatClause& candidate) {
59 for (const Watcher& watcher : list) {
60 if (watcher.clause == &candidate) return true;
61 }
62 return false;
63}
64
65// A simple wrapper to simplify the erase(std::remove_if()) pattern.
66template <typename Container, typename Predicate>
67void RemoveIf(Container c, Predicate p) {
68 c->erase(std::remove_if(c->begin(), c->end(), p), c->end());
69}
70
71} // namespace
72
73// ----- ClauseManager -----
74
76 : SatPropagator("ClauseManager"),
77 implication_graph_(model->GetOrCreate<BinaryImplicationGraph>()),
78 trail_(model->GetOrCreate<Trail>()),
79 num_inspected_clauses_(0),
80 num_inspected_clause_literals_(0),
81 num_watched_clauses_(0),
82 stats_("ClauseManager") {
83 trail_->RegisterPropagator(this);
84}
85
87 gtl::STLDeleteElements(&clauses_);
88 IF_STATS_ENABLED(LOG(INFO) << stats_.StatString());
89}
90
91void ClauseManager::Resize(int num_variables) {
92 DCHECK(is_clean_);
93 watchers_on_false_.resize(num_variables << 1);
94 reasons_.resize(num_variables);
95 needs_cleaning_.Resize(LiteralIndex(num_variables << 1));
96}
97
98// Note that this is the only place where we add Watcher so the DCHECK
99// guarantees that there are no duplicates.
100void ClauseManager::AttachOnFalse(Literal literal, Literal blocking_literal,
101 SatClause* clause) {
102 SCOPED_TIME_STAT(&stats_);
103 DCHECK(is_clean_);
104 DCHECK(!WatcherListContains(watchers_on_false_[literal], *clause));
105 watchers_on_false_[literal].push_back(Watcher(clause, blocking_literal));
106}
107
108bool ClauseManager::PropagateOnFalse(Literal false_literal, Trail* trail) {
109 SCOPED_TIME_STAT(&stats_);
110 DCHECK(is_clean_);
111 std::vector<Watcher>& watchers = watchers_on_false_[false_literal];
112 const auto assignment = AssignmentView(trail->Assignment());
113
114 // Note(user): It sounds better to inspect the list in order, this is because
115 // small clauses like binary or ternary clauses will often propagate and thus
116 // stay at the beginning of the list.
117 auto new_it = watchers.begin();
118 const auto end = watchers.end();
119 while (new_it != end && assignment.LiteralIsTrue(new_it->blocking_literal)) {
120 ++new_it;
121 }
122 for (auto it = new_it; it != end; ++it) {
123 // Don't even look at the clause memory if the blocking literal is true.
124 if (assignment.LiteralIsTrue(it->blocking_literal)) {
125 *new_it++ = *it;
126 continue;
127 }
128 ++num_inspected_clauses_;
129
130 // If the other watched literal is true, just change the blocking literal.
131 // Note that we use the fact that the first two literals of the clause are
132 // the ones currently watched.
133 Literal* literals = it->clause->literals();
134 const Literal other_watched_literal(
135 LiteralIndex(literals[0].Index().value() ^ literals[1].Index().value() ^
136 false_literal.Index().value()));
137 if (assignment.LiteralIsTrue(other_watched_literal)) {
138 *new_it = *it;
139 new_it->blocking_literal = other_watched_literal;
140 ++new_it;
141 ++num_inspected_clause_literals_;
142 continue;
143 }
144
145 // Look for another literal to watch. We go through the list in a cyclic
146 // fashion from start. The first two literals can be ignored as they are the
147 // watched ones.
148 {
149 const int start = it->start_index;
150 const int size = it->clause->size();
151 DCHECK_GE(start, 2);
152
153 int i = start;
154 while (i < size && assignment.LiteralIsFalse(literals[i])) ++i;
155 num_inspected_clause_literals_ += i - start + 2;
156 if (i >= size) {
157 i = 2;
158 while (i < start && assignment.LiteralIsFalse(literals[i])) ++i;
159 num_inspected_clause_literals_ += i - 2;
160 if (i >= start) i = size;
161 }
162 if (i < size) {
163 // literal[i] is unassigned or true, it's now the new literal to watch.
164 // Note that by convention, we always keep the two watched literals at
165 // the beginning of the clause.
166 literals[0] = other_watched_literal;
167 literals[1] = literals[i];
168 literals[i] = false_literal;
169 watchers_on_false_[literals[1]].emplace_back(
170 it->clause, other_watched_literal, i + 1);
171 continue;
172 }
173 }
174
175 // At this point other_watched_literal is either false or unassigned, all
176 // other literals are false.
177 if (assignment.LiteralIsFalse(other_watched_literal)) {
178 // Conflict: All literals of it->clause are false.
179 //
180 // Note(user): we could avoid a copy here, but the conflict analysis
181 // complexity will be a lot higher than this anyway.
182 trail->MutableConflict()->assign(it->clause->begin(), it->clause->end());
183 trail->SetFailingSatClause(it->clause);
184 num_inspected_clause_literals_ += it - watchers.begin() + 1;
185 watchers.erase(new_it, it);
186 return false;
187 } else {
188 // Propagation: other_watched_literal is unassigned, set it to true and
189 // put it at position 0. Note that the position 0 is important because
190 // we will need later to recover the literal that was propagated from the
191 // clause using this convention.
192 literals[0] = other_watched_literal;
193 literals[1] = false_literal;
194 reasons_[trail->Index()] = it->clause;
195 trail->Enqueue(other_watched_literal, propagator_id_);
196 *new_it++ = *it;
197 }
198 }
199 num_inspected_clause_literals_ += watchers.size(); // The blocking ones.
200 watchers.erase(new_it, end);
201 return true;
202}
203
205 const int old_index = trail->Index();
206 while (trail->Index() == old_index && propagation_trail_index_ < old_index) {
207 const Literal literal = (*trail)[propagation_trail_index_++];
208 if (!PropagateOnFalse(literal.Negated(), trail)) return false;
209 }
210 return true;
211}
212
213absl::Span<const Literal> ClauseManager::Reason(const Trail& /*trail*/,
214 int trail_index,
215 int64_t /*conflict_id*/) const {
216 return reasons_[trail_index]->PropagationReason();
217}
218
219SatClause* ClauseManager::ReasonClause(int trail_index) const {
220 return reasons_[trail_index];
221}
222
223bool ClauseManager::AddClause(absl::Span<const Literal> literals) {
224 return AddClause(literals, trail_, -1);
225}
226
227bool ClauseManager::AddClause(absl::Span<const Literal> literals, Trail* trail,
228 int lbd) {
229 SatClause* clause = SatClause::Create(literals);
230 clauses_.push_back(clause);
231 if (add_clause_callback_ != nullptr) add_clause_callback_(lbd, literals);
232 return AttachAndPropagate(clause, trail);
233}
234
235SatClause* ClauseManager::AddRemovableClause(absl::Span<const Literal> literals,
236 Trail* trail, int lbd) {
237 SatClause* clause = SatClause::Create(literals);
238 clauses_.push_back(clause);
239 if (add_clause_callback_ != nullptr) add_clause_callback_(lbd, literals);
240 CHECK(AttachAndPropagate(clause, trail));
241 return clause;
242}
243
244// Sets up the 2-watchers data structure. It selects two non-false literals
245// and attaches the clause to the event: one of the watched literals become
246// false. It returns false if the clause only contains literals assigned to
247// false. If only one literals is not false, it propagates it to true if it
248// is not already assigned.
249bool ClauseManager::AttachAndPropagate(SatClause* clause, Trail* trail) {
250 SCOPED_TIME_STAT(&stats_);
251
252 const int size = clause->size();
253 Literal* literals = clause->literals();
254
255 // Select the first two literals that are not assigned to false and put them
256 // on position 0 and 1.
257 int num_literal_not_false = 0;
258 for (int i = 0; i < size; ++i) {
259 if (!trail->Assignment().LiteralIsFalse(literals[i])) {
260 std::swap(literals[i], literals[num_literal_not_false]);
261 ++num_literal_not_false;
262 if (num_literal_not_false == 2) {
263 break;
264 }
265 }
266 }
267
268 // Returns false if all the literals were false.
269 // This should only happen on an UNSAT problem, and there is no need to attach
270 // the clause in this case.
271 if (num_literal_not_false == 0) return false;
272
273 if (num_literal_not_false == 1) {
274 // To maintain the validity of the 2-watcher algorithm, we need to watch
275 // the false literal with the highest decision level.
276 int max_level = trail->Info(literals[1].Variable()).level;
277 for (int i = 2; i < size; ++i) {
278 const int level = trail->Info(literals[i].Variable()).level;
279 if (level > max_level) {
280 max_level = level;
281 std::swap(literals[1], literals[i]);
282 }
283 }
284
285 // Propagates literals[0] if it is unassigned.
286 if (!trail->Assignment().LiteralIsTrue(literals[0])) {
287 reasons_[trail->Index()] = clause;
288 trail->Enqueue(literals[0], propagator_id_);
289 }
290 }
291
292 ++num_watched_clauses_;
293 AttachOnFalse(literals[0], literals[1], clause);
294 AttachOnFalse(literals[1], literals[0], clause);
295 return true;
296}
297
298void ClauseManager::Attach(SatClause* clause, Trail* trail) {
299 Literal* literals = clause->literals();
300 DCHECK(!trail->Assignment().LiteralIsAssigned(literals[0]));
301 DCHECK(!trail->Assignment().LiteralIsAssigned(literals[1]));
302
303 ++num_watched_clauses_;
304 AttachOnFalse(literals[0], literals[1], clause);
305 AttachOnFalse(literals[1], literals[0], clause);
306}
307
308void ClauseManager::InternalDetach(SatClause* clause) {
309 --num_watched_clauses_;
310 const size_t size = clause->size();
311 if (drat_proof_handler_ != nullptr && size > 2) {
312 drat_proof_handler_->DeleteClause({clause->begin(), size});
313 }
314 clauses_info_.erase(clause);
315 clause->Clear();
316}
317
319 InternalDetach(clause);
320 is_clean_ = false;
321 needs_cleaning_.Set(clause->FirstLiteral());
322 needs_cleaning_.Set(clause->SecondLiteral());
323}
324
326 InternalDetach(clause);
327 for (const Literal l : {clause->FirstLiteral(), clause->SecondLiteral()}) {
328 needs_cleaning_.Clear(l);
329 RemoveIf(&(watchers_on_false_[l]), [](const Watcher& watcher) {
330 return watcher.clause->IsRemoved();
331 });
332 }
333}
334
336 if (!all_clauses_are_attached_) return;
337 all_clauses_are_attached_ = false;
338
339 // This is easy, and this allows to reset memory if some watcher lists where
340 // really long at some point.
341 is_clean_ = true;
342 num_watched_clauses_ = 0;
343 watchers_on_false_.clear();
344}
345
347 if (all_clauses_are_attached_) return;
348 all_clauses_are_attached_ = true;
349
350 needs_cleaning_.ResetAllToFalse(); // This doesn't resize it.
351 watchers_on_false_.resize(needs_cleaning_.size().value());
352
354 for (SatClause* clause : clauses_) {
355 ++num_watched_clauses_;
356 DCHECK_GE(clause->size(), 2);
357 AttachOnFalse(clause->FirstLiteral(), clause->SecondLiteral(), clause);
358 AttachOnFalse(clause->SecondLiteral(), clause->FirstLiteral(), clause);
359 }
360}
361
362// This one do not need the clause to be detached.
364 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
365 if (drat_proof_handler_ != nullptr) {
366 drat_proof_handler_->AddClause({true_literal});
367 }
368 // TODO(user): remove the test when the DRAT issue with fixed literal is
369 // resolved.
370 if (!trail_->Assignment().LiteralIsTrue(true_literal)) {
371 trail_->EnqueueWithUnitReason(true_literal);
372
373 // Even when all clauses are detached, we can propagate the implication
374 // graph and we do that right away.
375 return implication_graph_->Propagate(trail_);
376 }
377 return true;
378}
379
380// TODO(user): We could do something slower if the clauses are attached like
381// we do for InprocessingRewriteClause().
383 DCHECK(!all_clauses_are_attached_);
384 if (drat_proof_handler_ != nullptr) {
385 drat_proof_handler_->DeleteClause(clause->AsSpan());
386 }
387 clauses_info_.erase(clause);
388 clause->Clear();
389}
390
392 SatClause* clause, absl::Span<const Literal> new_clause) {
393 if (new_clause.empty()) return false; // UNSAT.
394
395 if (DEBUG_MODE) {
396 for (const Literal l : new_clause) {
397 DCHECK(!trail_->Assignment().LiteralIsAssigned(l));
398 }
399 }
400
401 if (new_clause.size() == 1) {
402 if (!InprocessingFixLiteral(new_clause[0])) return false;
404 return true;
405 }
406
407 if (new_clause.size() == 2) {
408 CHECK(implication_graph_->AddBinaryClause(new_clause[0], new_clause[1]));
410 return true;
411 }
412
413 if (drat_proof_handler_ != nullptr) {
414 // We must write the new clause before we delete the old one.
415 drat_proof_handler_->AddClause(new_clause);
416 drat_proof_handler_->DeleteClause(clause->AsSpan());
417 }
418
419 if (all_clauses_are_attached_) {
420 // We can still rewrite the clause, but it is inefficient. We first
421 // detach it in a non-lazy way.
422 --num_watched_clauses_;
423 clause->Clear();
424 for (const Literal l : {clause->FirstLiteral(), clause->SecondLiteral()}) {
425 needs_cleaning_.Clear(l);
426 RemoveIf(&(watchers_on_false_[l]), [](const Watcher& watcher) {
427 return watcher.clause->IsRemoved();
428 });
429 }
430 }
431
432 clause->Rewrite(new_clause);
433
434 // And we reattach it.
435 if (all_clauses_are_attached_) Attach(clause, trail_);
436 return true;
437}
438
440 absl::Span<const Literal> new_clause) {
441 DCHECK(!new_clause.empty());
442 DCHECK(!all_clauses_are_attached_);
443 if (DEBUG_MODE) {
444 for (const Literal l : new_clause) {
445 DCHECK(!trail_->Assignment().LiteralIsAssigned(l));
446 }
447 }
448
449 if (new_clause.size() == 1) {
450 // TODO(user): We should return false...
451 if (!InprocessingFixLiteral(new_clause[0])) return nullptr;
452 return nullptr;
453 }
454
455 if (new_clause.size() == 2) {
456 implication_graph_->AddBinaryClause(new_clause[0], new_clause[1]);
457 return nullptr;
458 }
459
460 SatClause* clause = SatClause::Create(new_clause);
461 clauses_.push_back(clause);
462 return clause;
463}
464
466 SCOPED_TIME_STAT(&stats_);
467 for (const LiteralIndex index : needs_cleaning_.PositionsSetAtLeastOnce()) {
468 DCHECK(needs_cleaning_[index]);
469 RemoveIf(&(watchers_on_false_[index]), [](const Watcher& watcher) {
470 return watcher.clause->IsRemoved();
471 });
472 needs_cleaning_.Clear(index);
473 }
474 needs_cleaning_.NotifyAllClear();
475 is_clean_ = true;
476}
477
478// We also update to_minimize_index_/to_probe_index_ correctly.
479//
480// TODO(user): If more indices are needed, generalize the code to a vector of
481// indices.
483 DCHECK(is_clean_);
484
485 int new_size = 0;
486 const int old_size = clauses_.size();
487 for (int i = 0; i < old_size; ++i) {
488 if (i == to_minimize_index_) to_minimize_index_ = new_size;
489 if (i == to_first_minimize_index_) to_first_minimize_index_ = new_size;
490 if (i == to_probe_index_) to_probe_index_ = new_size;
491 if (clauses_[i]->IsRemoved()) {
492 delete clauses_[i];
493 } else {
494 clauses_[new_size++] = clauses_[i];
495 }
496 }
497 clauses_.resize(new_size);
498
499 if (to_minimize_index_ > new_size) to_minimize_index_ = new_size;
500 if (to_first_minimize_index_ > new_size) to_first_minimize_index_ = new_size;
501 if (to_probe_index_ > new_size) to_probe_index_ = new_size;
502}
503
505 for (; to_first_minimize_index_ < clauses_.size();
506 ++to_first_minimize_index_) {
507 if (clauses_[to_first_minimize_index_]->IsRemoved()) continue;
508 if (!IsRemovable(clauses_[to_first_minimize_index_])) {
509 // If the round-robin is in-sync with the new clauses, we may as well
510 // count this minimization as part of the round-robin and advance both
511 // indexes.
512 if (to_minimize_index_ == to_first_minimize_index_) {
513 ++to_minimize_index_;
514 }
515 return clauses_[to_first_minimize_index_++];
516 }
517 }
518 return nullptr;
519}
520
522 for (; to_minimize_index_ < clauses_.size(); ++to_minimize_index_) {
523 if (clauses_[to_minimize_index_]->IsRemoved()) continue;
524 if (!IsRemovable(clauses_[to_minimize_index_])) {
525 return clauses_[to_minimize_index_++];
526 }
527 }
528 return nullptr;
529}
530
532 for (; to_probe_index_ < clauses_.size(); ++to_probe_index_) {
533 if (clauses_[to_probe_index_]->IsRemoved()) continue;
534 return clauses_[to_probe_index_++];
535 }
536 return nullptr;
537}
538
539// ----- BinaryImplicationGraph -----
540
541void BinaryImplicationGraph::Resize(int num_variables) {
542 SCOPED_TIME_STAT(&stats_);
543 bfs_stack_.resize(num_variables << 1);
544 implications_.resize(num_variables << 1);
545 implies_something_.resize(num_variables << 1);
546 might_have_dups_.resize(num_variables << 1);
547 is_redundant_.resize(implications_.size());
548 is_removed_.resize(implications_.size(), false);
549 estimated_sizes_.resize(implications_.size(), 0);
550 in_direct_implications_.resize(implications_.size(), false);
551 reasons_.resize(num_variables);
552}
553
554void BinaryImplicationGraph::NotifyPossibleDuplicate(Literal a) {
555 if (might_have_dups_[a.Index()]) return;
556 might_have_dups_[a.Index()] = true;
557 to_clean_.push_back(a);
558}
559
561 for (const Literal l : to_clean_) {
562 might_have_dups_[l.Index()] = false;
563 gtl::STLSortAndRemoveDuplicates(&implications_[l.Index()]);
564 }
565 to_clean_.clear();
566}
567
568// TODO(user): Not all of the solver knows about representative literal, do
569// use them here to maintains invariant? Explore this when we start cleaning our
570// clauses using equivalence during search. We can easily do it for every
571// conflict we learn instead of here.
573 SCOPED_TIME_STAT(&stats_);
574
575 // Tricky: If this is the first clause, the propagator will be added and
576 // assumed to be in a "propagated" state. This makes sure this is the case.
577 if (no_constraint_ever_added_) propagation_trail_index_ = trail_->Index();
578 no_constraint_ever_added_ = false;
579
580 if (drat_proof_handler_ != nullptr) {
581 // TODO(user): Like this we will duplicate all binary clause from the
582 // problem. However this leads to a simpler API (since we don't need to
583 // special case the loading of the original clauses) and we mainly use drat
584 // proof for testing anyway.
585 drat_proof_handler_->AddClause({a, b});
586 }
587
588 if (is_redundant_[a.Index()]) a = Literal(representative_of_[a.Index()]);
589 if (is_redundant_[b.Index()]) b = Literal(representative_of_[b.Index()]);
590 if (a == b.Negated()) return true;
591 if (a == b) return FixLiteral(a);
592
593 DCHECK(!is_removed_[a]);
594 DCHECK(!is_removed_[b]);
595 estimated_sizes_[a.NegatedIndex()]++;
596 estimated_sizes_[b.NegatedIndex()]++;
597 implications_[a.NegatedIndex()].push_back(b);
598 implications_[b.NegatedIndex()].push_back(a);
599 implies_something_.Set(a.NegatedIndex());
600 implies_something_.Set(b.NegatedIndex());
601 NotifyPossibleDuplicate(a);
602 NotifyPossibleDuplicate(b);
603 is_dag_ = false;
604
605 if (enable_sharing_ && add_binary_callback_ != nullptr) {
606 add_binary_callback_(a, b);
607 }
608
609 const auto& assignment = trail_->Assignment();
610 if (trail_->CurrentDecisionLevel() == 0) {
611 DCHECK(!assignment.LiteralIsAssigned(a));
612 DCHECK(!assignment.LiteralIsAssigned(b));
613 } else {
614 if (assignment.LiteralIsFalse(a)) {
615 if (assignment.LiteralIsAssigned(b)) {
616 if (assignment.LiteralIsFalse(b)) return false;
617 } else {
618 reasons_[trail_->Index()] = a;
619 trail_->Enqueue(b, propagator_id_);
620 }
621 } else if (assignment.LiteralIsFalse(b)) {
622 if (!assignment.LiteralIsAssigned(a)) {
623 reasons_[trail_->Index()] = b;
624 trail_->Enqueue(a, propagator_id_);
625 }
626 }
627 }
628
629 return true;
630}
631
633 absl::Span<const Literal> at_most_one) {
634 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
635 if (at_most_one.size() <= 1) return true;
636
637 // Same as for AddBinaryClause().
638 if (no_constraint_ever_added_) propagation_trail_index_ = trail_->Index();
639 no_constraint_ever_added_ = false;
640
641 // Temporarily copy the at_most_one constraint at the end of
642 // at_most_one_buffer_. It will be cleaned up and added by
643 // CleanUpAndAddAtMostOnes().
644 const int base_index = at_most_one_buffer_.size();
645 at_most_one_buffer_.push_back(Literal(LiteralIndex(at_most_one.size())));
646 at_most_one_buffer_.insert(at_most_one_buffer_.end(), at_most_one.begin(),
647 at_most_one.end());
648
649 is_dag_ = false;
650 return CleanUpAndAddAtMostOnes(base_index);
651}
652
653// TODO(user): remove dupl with ClauseManager::InprocessingFixLiteral().
654//
655// Note that we currently do not support calling this at a positive level since
656// we might loose the fixing on backtrack otherwise.
657bool BinaryImplicationGraph::FixLiteral(Literal true_literal) {
658 CHECK_EQ(trail_->CurrentDecisionLevel(), 0);
659 if (trail_->Assignment().LiteralIsTrue(true_literal)) return true;
660 if (trail_->Assignment().LiteralIsFalse(true_literal)) return false;
661
662 if (drat_proof_handler_ != nullptr) {
663 drat_proof_handler_->AddClause({true_literal});
664 }
665
666 trail_->EnqueueWithUnitReason(true_literal);
667 return Propagate(trail_);
668}
669
670// This works by doing a linear scan on the at_most_one_buffer_ and
671// cleaning/copying the at most ones on the fly to the beginning of the same
672// buffer.
673bool BinaryImplicationGraph::CleanUpAndAddAtMostOnes(int base_index) {
674 const VariablesAssignment& assignment = trail_->Assignment();
675 int local_end = base_index;
676 const int buffer_size = at_most_one_buffer_.size();
677 for (int i = base_index; i < buffer_size;) {
678 // Process a new at most one.
679 // It will be copied into buffer[local_start + 1, local_end].
680 // With its size at buffer[local_start].
681 const int local_start = local_end;
682
683 // Update the iterator now. Even if the current at_most_one is reduced away,
684 // local_start will still point to the next one, or to the end of the
685 // buffer.
686 if (i == at_most_one_iterator_) {
687 at_most_one_iterator_ = local_start;
688 }
689
690 // We have an at_most_one starting at i, and we increment i to the next one.
691 const absl::Span<const Literal> initial_amo = AtMostOne(i);
692 i += initial_amo.size() + 1;
693
694 // Reserve space for size.
695 local_end++;
696 bool set_all_left_to_false = false;
697 for (const Literal l : initial_amo) {
698 if (assignment.LiteralIsFalse(l)) continue;
699 if (is_removed_[l]) continue;
700 if (!set_all_left_to_false && assignment.LiteralIsTrue(l)) {
701 set_all_left_to_false = true;
702 continue;
703 }
704 at_most_one_buffer_[local_end++] = RepresentativeOf(l);
705 }
706 at_most_one_buffer_[local_start] =
707 Literal(LiteralIndex(local_end - local_start - 1));
708
709 // Deal with duplicates.
710 // Any duplicate in an "at most one" must be false.
711 bool some_duplicates = false;
712 if (!set_all_left_to_false) {
713 // Sort the copied amo.
714 // We only want to expose a const AtMostOne() so we sort directly here.
715 Literal* pt = &at_most_one_buffer_[local_start + 1];
716 std::sort(pt, pt + AtMostOne(local_start).size());
717
718 LiteralIndex previous = kNoLiteralIndex;
719 bool remove_previous = false;
720 int new_local_end = local_start + 1;
721 for (const Literal l : AtMostOne(local_start)) {
722 if (l.Index() == previous) {
723 if (assignment.LiteralIsTrue(l)) return false;
724 if (!assignment.LiteralIsFalse(l)) {
725 if (!FixLiteral(l.Negated())) return false;
726 }
727 remove_previous = true;
728 some_duplicates = true;
729 continue;
730 }
731
732 // We need to pay attention to triplet or more of equal elements, so
733 // it is why we need this boolean and can't just remove it right away.
734 if (remove_previous) {
735 --new_local_end;
736 remove_previous = false;
737 }
738 previous = l.Index();
739 at_most_one_buffer_[new_local_end++] = l;
740 }
741 if (remove_previous) --new_local_end;
742
743 // Update local end and the amo size.
744 local_end = new_local_end;
745 at_most_one_buffer_[local_start] =
746 Literal(LiteralIndex(local_end - local_start - 1));
747 }
748
749 // If there was some duplicates, we need to rescan to see if a literal
750 // didn't become true because its negation was appearing twice!
751 if (some_duplicates) {
752 int new_local_end = local_start + 1;
753 for (const Literal l : AtMostOne(local_start)) {
754 if (assignment.LiteralIsFalse(l)) continue;
755 if (!set_all_left_to_false && assignment.LiteralIsTrue(l)) {
756 set_all_left_to_false = true;
757 continue;
758 }
759 at_most_one_buffer_[new_local_end++] = l;
760 }
761
762 // Update local end and the amo size.
763 local_end = new_local_end;
764 at_most_one_buffer_[local_start] =
765 Literal(LiteralIndex(local_end - local_start - 1));
766 }
767
768 // Deal with all false.
769 if (set_all_left_to_false) {
770 for (const Literal l : AtMostOne(local_start)) {
771 if (assignment.LiteralIsFalse(l)) continue;
772 if (assignment.LiteralIsTrue(l)) return false;
773 if (!FixLiteral(l.Negated())) return false;
774 }
775
776 // Erase this at_most_one.
777 local_end = local_start;
778 continue;
779 }
780
781 // Create a Span<> to simplify the code below.
782 const absl::Span<const Literal> at_most_one = AtMostOne(local_start);
783
784 // We expand small sizes into implications.
785 // TODO(user): Investigate what the best threshold is.
786 if (at_most_one.size() <= std::max(2, at_most_one_max_expansion_size_)) {
787 for (const Literal a : at_most_one) {
788 for (const Literal b : at_most_one) {
789 if (a == b) continue;
790 implications_[a].push_back(b.Negated());
791 implies_something_.Set(a);
792 NotifyPossibleDuplicate(a);
793 }
794 }
795
796 // This will erase the at_most_one from the buffer.
797 local_end = local_start;
798 continue;
799 }
800
801 // Index the new at most one.
802 for (const Literal l : at_most_one) {
803 if (l.Index() >= at_most_ones_.size()) {
804 at_most_ones_.resize(l.Index().value() + 1);
805 }
806 DCHECK(!is_redundant_[l]);
807 at_most_ones_[l].push_back(local_start);
808 implies_something_.Set(l);
809 }
810 }
811
812 at_most_one_buffer_.resize(local_end);
813 return true;
814}
815
817 SCOPED_TIME_STAT(&stats_);
818
819 if (IsEmpty()) {
821 return true;
822 }
824
825 const auto assignment = AssignmentView(trail->Assignment());
826 const auto implies_something = implies_something_.view();
827 auto* implications = implications_.data();
828
830 const Literal true_literal = (*trail)[propagation_trail_index_++];
831 DCHECK(assignment.LiteralIsTrue(true_literal));
832 if (!implies_something[true_literal]) continue;
833
834 // Note(user): This update is not exactly correct because in case of
835 // conflict we don't inspect that much clauses. But doing ++num_inspections_
836 // inside the loop does slow down the code by a few percent.
837 const absl::Span<const Literal> implied =
838 implications[true_literal.Index().value()];
839 num_inspections_ += implied.size();
840 for (const Literal literal : implied) {
841 if (assignment.LiteralIsTrue(literal)) {
842 // Note(user): I tried to update the reason here if the literal was
843 // enqueued after the true_literal on the trail. This property is
844 // important for ComputeFirstUIPConflict() to work since it needs the
845 // trail order to be a topological order for the deduction graph.
846 // But the performance was not too good...
847 continue;
848 }
849
850 ++num_propagations_;
851 if (assignment.LiteralIsFalse(literal)) {
852 // Conflict.
853 *(trail->MutableConflict()) = {true_literal.Negated(), literal};
854 return false;
855 } else {
856 // Propagation.
857 reasons_[trail->Index()] = true_literal.Negated();
858 trail->FastEnqueue(literal);
859 }
860 }
861
862 // Propagate the at_most_one constraints.
863 if (true_literal.Index() < at_most_ones_.size()) {
864 for (const int start : at_most_ones_[true_literal]) {
865 bool seen = false;
866 for (const Literal literal : AtMostOne(start)) {
867 ++num_inspections_;
868 if (literal == true_literal) {
869 if (DEBUG_MODE) {
870 DCHECK(!seen);
871 seen = true;
872 }
873 continue;
874 }
875 if (assignment.LiteralIsFalse(literal)) continue;
876
877 ++num_propagations_;
878 if (assignment.LiteralIsTrue(literal)) {
879 // Conflict.
880 *(trail->MutableConflict()) = {true_literal.Negated(),
881 literal.Negated()};
882 return false;
883 } else {
884 // Propagation.
885 reasons_[trail->Index()] = true_literal.Negated();
886 trail->FastEnqueue(literal.Negated());
887 }
888 }
889 }
890 }
891 }
892
893 return true;
894}
895
896absl::Span<const Literal> BinaryImplicationGraph::Reason(
897 const Trail& /*trail*/, int trail_index, int64_t /*conflict_id*/) const {
898 return {&reasons_[trail_index], 1};
899}
900
901// Here, we remove all the literal whose negation are implied by the negation of
902// the 1-UIP literal (which always appear first in the given conflict). Note
903// that this algorithm is "optimal" in the sense that it leads to a minimized
904// conflict with a backjump level as low as possible. However, not all possible
905// literals are removed.
906//
907// TODO(user): Also consider at most one?
909 std::vector<Literal>* conflict) {
910 SCOPED_TIME_STAT(&stats_);
911 dfs_stack_.clear();
912
913 // Compute the reachability from the literal "not(conflict->front())" using
914 // an iterative dfs.
915 const LiteralIndex root_literal_index = conflict->front().NegatedIndex();
916 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
917 is_marked_.Set(root_literal_index);
918
919 // TODO(user): This sounds like a good idea, but somehow it seems better not
920 // to do that even though it is almost for free. Investigate more.
921 //
922 // The idea here is that since we already compute the reachability from the
923 // root literal, we can use this computation to remove any implication
924 // root_literal => b if there is already root_literal => a and b is reachable
925 // from a.
926 const bool also_prune_direct_implication_list = false;
927
928 // We treat the direct implications differently so we can also remove the
929 // redundant implications from this list at the same time.
930 auto& direct_implications = implications_[root_literal_index];
931 for (const Literal l : direct_implications) {
932 if (is_marked_[l]) continue;
933 dfs_stack_.push_back(l);
934 while (!dfs_stack_.empty()) {
935 const LiteralIndex index = dfs_stack_.back().Index();
936 dfs_stack_.pop_back();
937 if (!is_marked_[index]) {
938 is_marked_.Set(index);
939 for (const Literal implied : implications_[index]) {
940 if (!is_marked_[implied]) dfs_stack_.push_back(implied);
941 }
942 }
943 }
944
945 // The "trick" is to unmark 'l'. This way, if we explore it twice, it means
946 // that this l is reachable from some other 'l' from the direct implication
947 // list. Remarks:
948 // - We don't loose too much complexity when this happen since a literal
949 // can be unmarked only once, so in the worst case we loop twice over its
950 // children. Moreover, this literal will be pruned for later calls.
951 // - This is correct, i.e. we can't prune too many literals because of a
952 // strongly connected component. Proof by contradiction: If we take the
953 // first (in direct_implications) literal from a removed SCC, it must
954 // have marked all the others. But because they are marked, they will not
955 // be explored again and so can't mark the first literal.
956 if (also_prune_direct_implication_list) {
957 is_marked_.Clear(l);
958 }
959 }
960
961 // Now we can prune the direct implications list and make sure are the
962 // literals there are marked.
963 if (also_prune_direct_implication_list) {
964 int new_size = 0;
965 for (const Literal l : direct_implications) {
966 if (!is_marked_[l]) {
967 is_marked_.Set(l);
968 direct_implications[new_size] = l;
969 ++new_size;
970 }
971 }
972 if (new_size < direct_implications.size()) {
973 num_redundant_implications_ += direct_implications.size() - new_size;
974 direct_implications.resize(new_size);
975 }
976 }
977
978 RemoveRedundantLiterals(conflict);
979}
980
981// Same as MinimizeConflictWithReachability() but also mark (in the given
982// SparseBitset) the reachable literal already assigned to false. These literals
983// will be implied if the 1-UIP literal is assigned to false, and the classic
984// minimization algorithm can take advantage of that.
986 const Trail& trail, std::vector<Literal>* conflict,
988 SCOPED_TIME_STAT(&stats_);
989 DCHECK(!conflict->empty());
990 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
991 MarkDescendants(conflict->front().Negated());
992 for (const LiteralIndex i : is_marked_.PositionsSetAtLeastOnce()) {
993 // TODO(user): if this is false, then we actually have a conflict of size 2.
994 // This can only happen if the binary clause was not propagated properly
995 // if for instance we do chronological bactracking without re-enqueuing the
996 // consequence of a binary clause.
997 if (trail.Assignment().LiteralIsTrue(Literal(i))) {
998 marked->Set(Literal(i).Variable());
999 }
1000 }
1001 RemoveRedundantLiterals(conflict);
1002}
1003
1004// Same as MinimizeConflictFirst() but take advantage of this reachability
1005// computation to remove redundant implication in the implication list of the
1006// first UIP conflict.
1008 const Trail& /*trail*/, std::vector<Literal>* conflict,
1009 absl::BitGenRef random) {
1010 SCOPED_TIME_STAT(&stats_);
1011 const LiteralIndex root_literal_index = conflict->front().NegatedIndex();
1012 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1013 is_marked_.Set(root_literal_index);
1014
1015 int new_size = 0;
1016 auto& direct_implications = implications_[root_literal_index];
1017
1018 // The randomization allow to find more redundant implication since to find
1019 // a => b and remove b, a must be before b in direct_implications. Note that
1020 // a std::reverse() could work too. But randomization seems to work better.
1021 // Probably because it has other impact on the search tree.
1022 std::shuffle(direct_implications.begin(), direct_implications.end(), random);
1023 dfs_stack_.clear();
1024 for (const Literal l : direct_implications) {
1025 if (is_marked_[l]) {
1026 // The literal is already marked! so it must be implied by one of the
1027 // previous literal in the direct_implications list. We can safely remove
1028 // it.
1029 continue;
1030 }
1031 direct_implications[new_size++] = l;
1032 dfs_stack_.push_back(l);
1033 while (!dfs_stack_.empty()) {
1034 const LiteralIndex index = dfs_stack_.back().Index();
1035 dfs_stack_.pop_back();
1036 if (!is_marked_[index]) {
1037 is_marked_.Set(index);
1038 for (const Literal implied : implications_[index]) {
1039 if (!is_marked_[implied]) dfs_stack_.push_back(implied);
1040 }
1041 }
1042 }
1043 }
1044 if (new_size < direct_implications.size()) {
1045 num_redundant_implications_ += direct_implications.size() - new_size;
1046 direct_implications.resize(new_size);
1047 }
1048 RemoveRedundantLiterals(conflict);
1049}
1050
1051void BinaryImplicationGraph::RemoveRedundantLiterals(
1052 std::vector<Literal>* conflict) {
1053 SCOPED_TIME_STAT(&stats_);
1054 int new_index = 1;
1055 for (int i = 1; i < conflict->size(); ++i) {
1056 if (!is_marked_[(*conflict)[i].NegatedIndex()]) {
1057 (*conflict)[new_index] = (*conflict)[i];
1058 ++new_index;
1059 }
1060 }
1061 if (new_index < conflict->size()) {
1062 ++num_minimization_;
1063 num_literals_removed_ += conflict->size() - new_index;
1064 conflict->resize(new_index);
1065 }
1066}
1067
1068// TODO(user): Also consider at most one?
1070 const Trail& trail, std::vector<Literal>* conflict) {
1071 SCOPED_TIME_STAT(&stats_);
1072 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1073 is_simplified_.ClearAndResize(LiteralIndex(implications_.size()));
1074 for (const Literal lit : *conflict) {
1075 is_marked_.Set(lit);
1076 }
1077
1078 // Identify and remove the redundant literals from the given conflict.
1079 // 1/ If a -> b then a can be removed from the conflict clause.
1080 // This is because not b -> not a.
1081 // 2/ a -> b can only happen if level(a) <= level(b).
1082 // 3/ Because of 2/, cycles can appear only at the same level.
1083 // The vector is_simplified_ is used to avoid removing all elements of a
1084 // cycle. Note that this is not optimal in the sense that we may not remove
1085 // a literal that can be removed.
1086 //
1087 // Note that there is no need to explore the unique literal of the highest
1088 // decision level since it can't be removed. Because this is a conflict, such
1089 // literal is always at position 0, so we start directly at 1.
1090 int index = 1;
1091 for (int i = 1; i < conflict->size(); ++i) {
1092 const Literal lit = (*conflict)[i];
1093 const int lit_level = trail.Info(lit.Variable()).level;
1094 bool keep_literal = true;
1095 for (const Literal implied : implications_[lit]) {
1096 if (is_marked_[implied]) {
1097 DCHECK_LE(lit_level, trail.Info(implied.Variable()).level);
1098 if (lit_level == trail.Info(implied.Variable()).level &&
1099 is_simplified_[implied]) {
1100 continue;
1101 }
1102 keep_literal = false;
1103 break;
1104 }
1105 }
1106 if (keep_literal) {
1107 (*conflict)[index] = lit;
1108 ++index;
1109 } else {
1110 is_simplified_.Set(lit);
1111 }
1112 }
1113 if (index < conflict->size()) {
1114 ++num_minimization_;
1115 num_literals_removed_ += conflict->size() - index;
1116 conflict->erase(conflict->begin() + index, conflict->end());
1117 }
1118}
1119
1121 SCOPED_TIME_STAT(&stats_);
1122 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
1123 if (IsEmpty()) return;
1124
1125 // Nothing to do if nothing changed since last call.
1126 const int new_num_fixed = trail_->Index();
1127 DCHECK_EQ(propagation_trail_index_, new_num_fixed);
1128 if (num_processed_fixed_variables_ == new_num_fixed) return;
1129
1130 const VariablesAssignment& assignment = trail_->Assignment();
1131 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1132 for (; num_processed_fixed_variables_ < new_num_fixed;
1133 ++num_processed_fixed_variables_) {
1134 const Literal true_literal = (*trail_)[num_processed_fixed_variables_];
1135 if (DEBUG_MODE) {
1136 // The code assumes that everything is already propagated.
1137 // Otherwise we will remove implications that didn't propagate yet!
1138 for (const Literal lit : implications_[true_literal]) {
1139 DCHECK(trail_->Assignment().LiteralIsTrue(lit));
1140 }
1141 }
1142
1143 // If b is true and a -> b then because not b -> not a, all the
1144 // implications list that contains b will be marked by this process.
1145 // And the ones that contains not(b) should correspond to a false literal!
1146 //
1147 // TODO(user): This might not be true if we remove implication by
1148 // transitive reduction and the process was aborted due to the computation
1149 // limit. I think it will be good to maintain that invariant though,
1150 // otherwise fixed literals might never be removed from these lists...
1151 for (const Literal lit : implications_[true_literal.NegatedIndex()]) {
1152 if (lit.NegatedIndex() < representative_of_.size() &&
1153 representative_of_[lit.Negated()] != kNoLiteralIndex) {
1154 // We mark its representative instead.
1155 is_marked_.Set(representative_of_[lit.Negated()]);
1156 } else {
1157 is_marked_.Set(lit.NegatedIndex());
1158 }
1159 }
1160 gtl::STLClearObject(&(implications_[true_literal]));
1161 gtl::STLClearObject(&(implications_[true_literal.NegatedIndex()]));
1162
1163 if (true_literal.Index() < at_most_ones_.size()) {
1164 gtl::STLClearObject(&(at_most_ones_[true_literal]));
1165 }
1166 if (true_literal.NegatedIndex() < at_most_ones_.size()) {
1167 gtl::STLClearObject(&(at_most_ones_[true_literal.NegatedIndex()]));
1168 }
1169 }
1170 for (const LiteralIndex i : is_marked_.PositionsSetAtLeastOnce()) {
1171 RemoveIf(&implications_[i], [&assignment](const Literal& lit) {
1172 return assignment.LiteralIsTrue(lit);
1173 });
1174 }
1175
1176 // TODO(user): This might be a bit slow. Do not call all the time if needed,
1177 // this shouldn't change the correctness of the code.
1178 at_most_ones_.clear();
1179 CHECK(CleanUpAndAddAtMostOnes(/*base_index=*/0));
1180 DCHECK(InvariantsAreOk());
1181}
1182
1184 public:
1192
1193 explicit SccGraph(SccFinder* finder, Implications* graph,
1194 AtMostOnes* at_most_ones,
1195 std::vector<Literal>* at_most_one_buffer)
1196 : finder_(*finder),
1197 implications_(*graph),
1198 at_most_ones_(*at_most_ones),
1199 at_most_one_buffer_(*at_most_one_buffer) {}
1200
1201 const std::vector<int32_t>& operator[](int32_t node) const {
1202 tmp_.clear();
1203 for (const Literal l : implications_[LiteralIndex(node)]) {
1204 tmp_.push_back(l.Index().value());
1205 if (finder_.NodeIsInCurrentDfsPath(l.NegatedIndex().value())) {
1206 to_fix_.push_back(l);
1207 }
1208 }
1209 if (node < at_most_ones_.size()) {
1210 for (const int start : at_most_ones_[LiteralIndex(node)]) {
1211 if (start >= at_most_one_already_explored_.size()) {
1212 at_most_one_already_explored_.resize(start + 1, false);
1213 previous_node_to_explore_at_most_one_.resize(start + 1);
1214 }
1215
1216 // In the presence of at_most_ones_ constraints, expanding them
1217 // implicitly to implications in the SCC computation can result in a
1218 // quadratic complexity rather than a linear one in term of the input
1219 // data structure size. So this test here is critical on problem with
1220 // large at_most ones like the "ivu06-big.mps.gz" where without it, the
1221 // full FindStronglyConnectedComponents() take more than on hour instead
1222 // of less than a second!
1223 if (at_most_one_already_explored_[start]) {
1224 // We never expand a node twice.
1225 const int first_node = previous_node_to_explore_at_most_one_[start];
1226 DCHECK_NE(node, first_node);
1227
1228 if (finder_.NodeIsInCurrentDfsPath(first_node)) {
1229 // If the first node is not settled, then we do explore the
1230 // at_most_one constraint again. In "Mixed-Integer-Programming:
1231 // Analyzing 12 years of progress", Tobias Achterberg and Roland
1232 // Wunderling explains that an at most one need to be looped over at
1233 // most twice. I am not sure exactly how that works, so for now we
1234 // are not fully linear, but on actual instances, we only rarely
1235 // run into this case.
1236 //
1237 // Note that we change the previous node to explore at most one
1238 // since the current node will be settled before the old ones.
1239 //
1240 // TODO(user): avoid looping more than twice on the same at most one
1241 // constraints? Note that the second time we loop we have x => y =>
1242 // not(x), so we can already detect that x must be false which we
1243 // detect below.
1244 previous_node_to_explore_at_most_one_[start] = node;
1245 } else {
1246 // The first node is already settled and so are all its child. Only
1247 // not(first_node) might still need exploring.
1248 tmp_.push_back(
1249 Literal(LiteralIndex(first_node)).NegatedIndex().value());
1250 continue;
1251 }
1252 } else {
1253 at_most_one_already_explored_[start] = true;
1254 previous_node_to_explore_at_most_one_[start] = node;
1255 }
1256
1257 const absl::Span<const Literal> amo =
1258 absl::MakeSpan(&at_most_one_buffer_[start + 1],
1259 at_most_one_buffer_[start].Index().value());
1260 for (const Literal l : amo) {
1261 if (l.Index() == node) continue;
1262 tmp_.push_back(l.NegatedIndex().value());
1263 if (finder_.NodeIsInCurrentDfsPath(l.Index().value())) {
1264 to_fix_.push_back(l.Negated());
1265 }
1266 }
1267 }
1268 }
1269 work_done_ += tmp_.size();
1270 return tmp_;
1271 }
1272
1273 // All these literals where detected to be true during the SCC computation.
1274 mutable std::vector<Literal> to_fix_;
1275
1276 // For the deterministic time.
1277 mutable int64_t work_done_ = 0;
1278
1279 private:
1280 const SccFinder& finder_;
1281 const Implications& implications_;
1282 const AtMostOnes& at_most_ones_;
1283 const std::vector<Literal>& at_most_one_buffer_;
1284
1285 mutable std::vector<int32_t> tmp_;
1286
1287 // Used to get a non-quadratic complexity in the presence of at most ones.
1288 mutable std::vector<bool> at_most_one_already_explored_;
1289 mutable std::vector<int> previous_node_to_explore_at_most_one_;
1290};
1291
1293 // This was already called, and no new constraint where added. Note that new
1294 // fixed variable cannot create new equivalence, only new binary clauses do.
1295 if (is_dag_) return true;
1296 WallTimer wall_timer;
1297 wall_timer.Start();
1298 log_info |= VLOG_IS_ON(1);
1299
1300 // Lets remove all fixed variables first.
1301 if (!Propagate(trail_)) return false;
1303 const VariablesAssignment& assignment = trail_->Assignment();
1304 DCHECK(InvariantsAreOk());
1305
1306 // TODO(user): We could just do it directly though.
1307 const int32_t size(implications_.size());
1309 scc.reserve(size);
1310 double dtime = 0.0;
1311 {
1312 SccGraph::SccFinder finder;
1313 SccGraph graph(&finder, &implications_, &at_most_ones_,
1314 &at_most_one_buffer_);
1315 finder.FindStronglyConnectedComponents(size, graph, &scc);
1316 dtime += 4e-8 * graph.work_done_;
1317
1318 for (const Literal l : graph.to_fix_) {
1319 if (assignment.LiteralIsFalse(l)) return false;
1320 if (assignment.LiteralIsTrue(l)) continue;
1321 if (!FixLiteral(l)) return false;
1322 }
1323 }
1324
1325 // The old values will still be valid.
1326 representative_of_.resize(size, kNoLiteralIndex);
1327 is_redundant_.resize(size);
1328
1329 int num_equivalences = 0;
1330 reverse_topological_order_.clear();
1331 for (int index = 0; index < scc.size(); ++index) {
1332 const absl::Span<int32_t> component = scc[index];
1333
1334 // If one is fixed then all must be fixed. Note that the reason why the
1335 // propagation didn't already do that and we don't always get fixed
1336 // component of size 1 is because of the potential newly fixed literals
1337 // above.
1338 //
1339 // In any case, all fixed literals are marked as redundant.
1340 {
1341 bool all_fixed = false;
1342 bool all_true = false;
1343 for (const int32_t i : component) {
1344 const Literal l = Literal(LiteralIndex(i));
1345 if (trail_->Assignment().LiteralIsAssigned(l)) {
1346 all_fixed = true;
1347 all_true = trail_->Assignment().LiteralIsTrue(l);
1348 break;
1349 }
1350 }
1351 if (all_fixed) {
1352 for (const int32_t i : component) {
1353 const Literal l = Literal(LiteralIndex(i));
1354 if (!is_redundant_[l]) {
1355 ++num_redundant_literals_;
1356 is_redundant_.Set(l);
1357 representative_of_[l] = l.Index();
1358 }
1359 const Literal to_fix = all_true ? l : l.Negated();
1360 if (assignment.LiteralIsFalse(to_fix)) return false;
1361 if (assignment.LiteralIsTrue(to_fix)) continue;
1362 if (!FixLiteral(l)) return false;
1363 }
1364
1365 // Next component.
1366 continue;
1367 }
1368 }
1369
1370 // We ignore variable that appear in no constraints.
1371 if (component.size() == 1 && is_removed_[LiteralIndex(component[0])]) {
1372 continue;
1373 }
1374
1375 // We always take the smallest literal index (which also corresponds to the
1376 // smallest BooleanVariable index) as a representative. This make sure that
1377 // the representative of a literal l and the one of not(l) will be the
1378 // negation of each other. There is also reason to think that it is
1379 // heuristically better to use a BooleanVariable that was created first.
1380 std::sort(component.begin(), component.end());
1381 const LiteralIndex representative(component[0]);
1382 reverse_topological_order_.push_back(representative);
1383
1384 if (component.size() == 1) {
1385 // Note that because we process list in reverse topological order, this
1386 // is only needed if there is any equivalence before this point.
1387 if (num_equivalences > 0) {
1388 auto& representative_list = implications_[representative];
1389 for (Literal& ref : representative_list) {
1390 const LiteralIndex rep = representative_of_[ref];
1391 if (rep == representative) continue;
1392 if (rep == kNoLiteralIndex) continue;
1393 ref = Literal(rep);
1394 }
1395 gtl::STLSortAndRemoveDuplicates(&representative_list);
1396 }
1397 continue;
1398 }
1399
1400 // Sets the representative.
1401 for (int i = 1; i < component.size(); ++i) {
1402 const Literal literal = Literal(LiteralIndex(component[i]));
1403 if (!is_redundant_[literal]) {
1404 ++num_redundant_literals_;
1405 is_redundant_.Set(literal);
1406 }
1407 representative_of_[literal] = representative;
1408
1409 // Detect if x <=> not(x) which means unsat. Note that we relly on the
1410 // fact that when sorted, they will both be consecutive in the list.
1411 if (Literal(LiteralIndex(component[i - 1])).Negated() == literal) {
1412 LOG_IF(INFO, log_info) << "Trivially UNSAT in DetectEquivalences()";
1413 return false;
1414 }
1415 }
1416
1417 // Merge all the lists in implications_[representative].
1418 // Note that we do not want representative in its own list.
1419 auto& representative_list = implications_[representative];
1420 int new_size = 0;
1421 for (const Literal l : representative_list) {
1422 const Literal rep = RepresentativeOf(l);
1423 if (rep.Index() == representative) continue;
1424 representative_list[new_size++] = rep;
1425 }
1426 representative_list.resize(new_size);
1427 for (int i = 1; i < component.size(); ++i) {
1428 const Literal literal = Literal(LiteralIndex(component[i]));
1429 auto& ref = implications_[literal];
1430 for (const Literal l : ref) {
1431 const Literal rep = RepresentativeOf(l);
1432 if (rep.Index() != representative) representative_list.push_back(rep);
1433 }
1434 dtime += 1e-8 * static_cast<double>(ref.size());
1435
1436 // Add representative <=> literal.
1437 //
1438 // Remark: this relation do not need to be added to a DRAT proof since
1439 // the redundant variables should never be used again for a pure SAT
1440 // problem.
1441 representative_list.push_back(literal);
1442 ref.clear();
1443 ref.push_back(Literal(representative));
1444 }
1445 gtl::STLSortAndRemoveDuplicates(&representative_list);
1446 num_equivalences += component.size() - 1;
1447 }
1448
1449 is_dag_ = true;
1450 if (num_equivalences != 0) {
1451 // Remap all at most ones. Remove fixed variables, process duplicates. Note
1452 // that this might result in more implications when we expand small at most
1453 // one.
1454 at_most_ones_.clear();
1455 int saved_trail_index = propagation_trail_index_;
1456 if (!CleanUpAndAddAtMostOnes(/*base_index=*/0)) return false;
1457
1458 // This might have run the propagation on a few variables without taking
1459 // into account the AMOs. Propagate again.
1460 //
1461 // TODO(user): Maybe a better alternative is to not propagate when we fix
1462 // variables inside CleanUpAndAddAtMostOnes().
1463 if (propagation_trail_index_ != saved_trail_index) {
1464 propagation_trail_index_ = saved_trail_index;
1465 Propagate(trail_);
1466 }
1467 }
1468
1469 time_limit_->AdvanceDeterministicTime(dtime);
1470 const int num_fixed_during_scc =
1471 trail_->Index() - num_processed_fixed_variables_;
1473 DCHECK(InvariantsAreOk());
1474 LOG_IF(INFO, log_info) << "SCC. " << num_equivalences
1475 << " redundant equivalent literals. "
1476 << num_fixed_during_scc << " fixed. "
1478 << " implications left. " << implications_.size()
1479 << " literals."
1480 << " size of at_most_one buffer = "
1481 << at_most_one_buffer_.size() << "."
1482 << " dtime: " << dtime
1483 << " wtime: " << wall_timer.Get();
1484 return true;
1485}
1486
1487// Note that as a side effect this also do a full "failed literal probing"
1488// using the binary implication graph only.
1489//
1490// TODO(user): Track which literal have new implications, and only process
1491// the antecedents of these.
1493 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
1494 if (time_limit_->LimitReached()) return true;
1495 if (!DetectEquivalences()) return false;
1496
1497 // TODO(user): the situation with fixed variable is not really "clean".
1498 // Simplify the code so we are sure we don't run into issue or have to deal
1499 // with any of that here.
1500 if (time_limit_->LimitReached()) return true;
1501 if (!Propagate(trail_)) return false;
1503 DCHECK(InvariantsAreOk());
1504 if (time_limit_->LimitReached()) return true;
1505
1506 log_info |= VLOG_IS_ON(1);
1507 WallTimer wall_timer;
1508 wall_timer.Start();
1509
1510 int64_t num_fixed = 0;
1511 int64_t num_new_redundant_implications = 0;
1512 bool aborted = false;
1513 tmp_removed_.clear();
1514 work_done_in_mark_descendants_ = 0;
1515 int marked_index = 0;
1516
1517 // For each node we do a graph traversal and only keep the literals
1518 // at maximum distance 1. This only works because we have a DAG when ignoring
1519 // the "redundant" literal marked by DetectEquivalences(). Note that we also
1520 // need no duplicates in the implications list for correctness which is also
1521 // guaranteed by DetectEquivalences().
1522 //
1523 // TODO(user): We should be able to reuse some propagation like it is done for
1524 // tree-look. Once a node is processed, we just need to process a node that
1525 // implies it. Test if we can make this faster. Alternatively, only clear
1526 // a part of is_marked_ (after the first child of root in reverse topo order).
1527 //
1528 // TODO(user): Can we exploit the fact that the implication graph is a
1529 // skew-symmetric graph (isomorphic to its transposed) so that we do less
1530 // work?
1531 const LiteralIndex size(implications_.size());
1532 LiteralIndex previous = kNoLiteralIndex;
1533 for (const LiteralIndex root : reverse_topological_order_) {
1534 // In most situation reverse_topological_order_ contains no redundant, fixed
1535 // or removed variables. But the reverse_topological_order_ is only
1536 // recomputed when new binary are added to the graph, not when new variable
1537 // are fixed.
1538 if (is_redundant_[root]) continue;
1539 if (trail_->Assignment().LiteralIsAssigned(Literal(root))) continue;
1540
1541 auto& direct_implications = implications_[root];
1542 if (direct_implications.empty()) continue;
1543
1544 // This is a "poor" version of the tree look stuff, but it does show good
1545 // improvement. If we just processed one of the child of root, we don't
1546 // need to re-explore it.
1547 //
1548 // TODO(user): Another optim we can do is that we never need to expand
1549 // any node with a reverse topo order smaller or equal to the min of the
1550 // ones in this list.
1551 bool clear_previous_reachability = true;
1552 for (const Literal direct_child : direct_implications) {
1553 if (direct_child.Index() == previous) {
1554 clear_previous_reachability = false;
1555 is_marked_.Clear(previous);
1556 break;
1557 }
1558 }
1559 if (clear_previous_reachability) {
1560 is_marked_.ClearAndResize(size);
1561 marked_index = 0;
1562 }
1563 previous = root;
1564
1565 for (const Literal direct_child : direct_implications) {
1566 if (is_redundant_[direct_child]) continue;
1567 if (is_marked_[direct_child]) continue;
1568
1569 // This is a corner case where because of equivalent literal, root
1570 // appear in implications_[root], we will remove it below.
1571 if (direct_child.Index() == root) continue;
1572
1573 // When this happens, then root must be false, we handle this just after
1574 // the loop.
1575 if (direct_child.NegatedIndex() == root) {
1576 is_marked_.Set(direct_child);
1577 break;
1578 }
1579
1580 MarkDescendants(direct_child);
1581
1582 // We have a DAG, so direct_child could only be marked first.
1583 is_marked_.Clear(direct_child);
1584 }
1585 DCHECK(!is_marked_[root])
1586 << "DetectEquivalences() should have removed cycles!";
1587 is_marked_.Set(root);
1588
1589 // Also mark all the ones reachable through the root AMOs.
1590 if (root < at_most_ones_.size()) {
1591 auto is_marked = is_marked_.BitsetView();
1592 for (const int start : at_most_ones_[root]) {
1593 for (const Literal l : AtMostOne(start)) {
1594 if (l.Index() == root) continue;
1595 if (!is_marked[l.Negated()] && !is_redundant_[l.Negated()]) {
1596 is_marked_.SetUnsafe(is_marked, l.Negated());
1597 MarkDescendants(l.Negated());
1598 }
1599 }
1600 }
1601 }
1602
1603 // Failed literal probing. If both x and not(x) are marked then root must be
1604 // false. Note that because we process "roots" in reverse topological order,
1605 // we will fix the LCA of x and not(x) first.
1606 const auto& marked_positions = is_marked_.PositionsSetAtLeastOnce();
1607 for (; marked_index < marked_positions.size(); ++marked_index) {
1608 const LiteralIndex i = marked_positions[marked_index];
1609 if (is_marked_[Literal(i).NegatedIndex()]) {
1610 // We tested that at the beginning.
1611 DCHECK(!trail_->Assignment().LiteralIsAssigned(Literal(root)));
1612
1613 // We propagate right away the binary implications so that we do not
1614 // need to consider all antecedents of root in the transitive
1615 // reduction.
1616 ++num_fixed;
1617 if (!FixLiteral(Literal(root).Negated())) return false;
1618 break;
1619 }
1620 }
1621
1622 // Note that direct_implications will be cleared by
1623 // RemoveFixedVariables() that will need to inspect it to completely
1624 // remove Literal(root) from all lists.
1625 if (trail_->Assignment().LiteralIsAssigned(Literal(root))) continue;
1626
1627 // Only keep the non-marked literal (and the redundant one which are never
1628 // marked). We mark root to remove it in the corner case where it was
1629 // there.
1630 int new_size = 0;
1631 for (const Literal l : direct_implications) {
1632 if (!is_marked_[l]) {
1633 direct_implications[new_size++] = l;
1634 } else {
1635 tmp_removed_.push_back({Literal(root), l});
1636 DCHECK(!is_redundant_[l]);
1637 }
1638 }
1639 const int diff = direct_implications.size() - new_size;
1640 direct_implications.resize(new_size);
1641 direct_implications.shrink_to_fit();
1642 num_new_redundant_implications += diff;
1643
1644 // Abort if the computation involved is too big.
1645 if (work_done_in_mark_descendants_ > 1e8) {
1646 aborted = true;
1647 break;
1648 }
1649 }
1650
1651 is_marked_.ClearAndResize(size);
1652
1653 // If we aborted early, we might no longer have both a=>b and not(b)=>not(a).
1654 // This is not desirable has some algo relies on this invariant. We fix this
1655 // here.
1656 if (aborted) {
1657 absl::flat_hash_set<std::pair<LiteralIndex, LiteralIndex>> removed;
1658 for (const auto [a, b] : tmp_removed_) {
1659 removed.insert({a.Index(), b.Index()});
1660 }
1661 for (LiteralIndex i(0); i < implications_.size(); ++i) {
1662 int new_size = 0;
1663 const LiteralIndex negated_i = Literal(i).NegatedIndex();
1664 auto& implication = implications_[i];
1665 for (const Literal l : implication) {
1666 if (removed.contains({l.NegatedIndex(), negated_i})) continue;
1667 implication[new_size++] = l;
1668 }
1669 implication.resize(new_size);
1670 }
1671 }
1672 if (num_fixed > 0) {
1674 }
1675 DCHECK(InvariantsAreOk());
1676
1677 gtl::STLClearObject(&tmp_removed_);
1678 const double dtime = 1e-8 * work_done_in_mark_descendants_;
1679 time_limit_->AdvanceDeterministicTime(dtime);
1680 num_redundant_implications_ += num_new_redundant_implications;
1681 LOG_IF(INFO, log_info) << "Transitive reduction removed "
1682 << num_new_redundant_implications << " literals. "
1683 << num_fixed << " fixed. "
1685 << " implications left. " << implications_.size()
1686 << " literals." << " dtime: " << dtime
1687 << " wtime: " << wall_timer.Get()
1688 << (aborted ? " Aborted." : "");
1689 return true;
1690}
1691
1692namespace {
1693
1694int ElementInIntersectionOrMinusOne(absl::Span<const int> a,
1695 absl::Span<const int> b) {
1696 DCHECK(std::is_sorted(a.begin(), a.end()));
1697 DCHECK(std::is_sorted(b.begin(), b.end()));
1698 if (a.empty() || b.empty()) return -1;
1699 int i = 0;
1700 int j = 0;
1701 while (true) {
1702 if (a[i] == b[j]) return a[i];
1703 if (a[i] < b[j]) {
1704 if (++i == a.size()) return -1;
1705 } else {
1706 if (++j == b.size()) return -1;
1707 }
1708 }
1709}
1710
1711} // namespace
1712
1713std::vector<std::pair<int, int>>
1714BinaryImplicationGraph::FilterAndSortAtMostOnes(
1715 absl::Span<std::vector<Literal>> at_most_ones) {
1716 // We starts by processing larger constraints first.
1717 // But we want the output order to be stable.
1718 std::vector<std::pair<int, int>> index_size_vector;
1719 const int num_amos = at_most_ones.size();
1720 index_size_vector.reserve(num_amos);
1721 for (int index = 0; index < num_amos; ++index) {
1722 std::vector<Literal>& clique = at_most_ones[index];
1723 if (clique.size() <= 1) continue;
1724
1725 // Note(user): Because we always use literal with the smallest variable
1726 // indices as representative, this make sure that if possible, we express
1727 // the clique in term of user provided variable (that are always created
1728 // first).
1729 //
1730 // Remap the clique to only use representative.
1731 for (Literal& ref : clique) {
1732 DCHECK_LT(ref.Index(), representative_of_.size());
1733 const LiteralIndex rep = representative_of_[ref];
1734 if (rep == kNoLiteralIndex) continue;
1735 ref = Literal(rep);
1736 }
1737
1738 // We skip anything that can be presolved further as the code below do
1739 // not handle duplicate well.
1740 //
1741 // TODO(user): Shall we presolve it here?
1742 bool skip = false;
1743 std::sort(clique.begin(), clique.end());
1744 for (int i = 1; i < clique.size(); ++i) {
1745 if (clique[i] == clique[i - 1] || clique[i] == clique[i - i].Negated()) {
1746 skip = true;
1747 break;
1748 }
1749 }
1750 if (skip) continue;
1751
1752 index_size_vector.push_back({index, clique.size()});
1753 }
1754 std::stable_sort(
1755 index_size_vector.begin(), index_size_vector.end(),
1756 [](const std::pair<int, int> a, const std::pair<int, int>& b) {
1757 return a.second > b.second;
1758 });
1759 return index_size_vector;
1760}
1761
1763 std::vector<std::vector<Literal>>* at_most_ones,
1764 int64_t max_num_explored_nodes) {
1765 // The code below assumes a DAG.
1766 if (!DetectEquivalences()) return false;
1767 work_done_in_mark_descendants_ = 0;
1768
1769 int num_extended = 0;
1770 int num_removed = 0;
1771 int num_added = 0;
1772
1773 // Data to detect inclusion of base amo into extend amo.
1774 std::vector<int> detector_clique_index;
1776 InclusionDetector detector(storage, time_limit_);
1777 detector.SetWorkLimit(1e9);
1778
1779 std::vector<int> dense_index_to_index;
1781 max_cliques_containing(implications_.size());
1782
1783 const std::vector<std::pair<int, int>> index_size_vector =
1784 FilterAndSortAtMostOnes(absl::MakeSpan(*at_most_ones));
1785
1786 absl::flat_hash_set<int> cannot_be_removed;
1787 std::vector<bool> was_extended(at_most_ones->size(), false);
1788 for (const auto& [index, old_size] : index_size_vector) {
1789 std::vector<Literal>& clique = (*at_most_ones)[index];
1790 if (time_limit_->LimitReached()) break;
1791
1792 // Special case for clique of size 2, we don't expand them if they
1793 // are included in an already added clique.
1794 if (clique.size() == 2) {
1795 DCHECK_NE(clique[0], clique[1]);
1796 const int dense_index = ElementInIntersectionOrMinusOne(
1797 max_cliques_containing[clique[0]], max_cliques_containing[clique[1]]);
1798 if (dense_index >= 0) {
1799 const int superset_index = dense_index_to_index[dense_index];
1800 if (was_extended[superset_index]) {
1801 cannot_be_removed.insert(superset_index);
1802 }
1803 ++num_removed;
1804 clique.clear();
1805 continue;
1806 }
1807 }
1808
1809 // Save the non-extended version as possible subset.
1810 // TODO(user): Detect on the fly is superset already exist.
1811 detector_clique_index.push_back(index);
1812 detector.AddPotentialSubset(storage.AddLiterals(clique));
1813
1814 // We only expand the clique as long as we didn't spend too much time.
1815 if (work_done_in_mark_descendants_ < max_num_explored_nodes) {
1816 clique = ExpandAtMostOne(clique, max_num_explored_nodes);
1817 }
1818
1819 // Save the extended version as possible superset.
1820 detector_clique_index.push_back(index);
1821 detector.AddPotentialSuperset(storage.AddLiterals(clique));
1822
1823 // Also index clique for size 2 quick lookup.
1824 const int dense_index = dense_index_to_index.size();
1825 dense_index_to_index.push_back(index);
1826 for (const Literal l : clique) {
1827 max_cliques_containing[l].push_back(dense_index);
1828 }
1829
1830 if (clique.size() > old_size) {
1831 was_extended[index] = true;
1832 ++num_extended;
1833 }
1834 ++num_added;
1835 }
1836
1837 // Remove clique (before extension) that are included in an extended one.
1838 detector.DetectInclusions([&](int subset, int superset) {
1839 const int subset_index = detector_clique_index[subset];
1840 const int superset_index = detector_clique_index[superset];
1841 if (subset_index == superset_index) return;
1842
1843 // Abort if one was already deleted.
1844 if ((*at_most_ones)[subset_index].empty()) return;
1845 if ((*at_most_ones)[superset_index].empty()) return;
1846
1847 // If an extended clique already cover a deleted one, we cannot try to
1848 // remove it by looking at its non-extended version.
1849 if (cannot_be_removed.contains(subset_index)) return;
1850
1851 ++num_removed;
1852 (*at_most_ones)[subset_index].clear();
1853 if (was_extended[superset_index]) cannot_be_removed.insert(superset_index);
1854 });
1855
1856 if (num_extended > 0 || num_removed > 0 || num_added > 0) {
1857 VLOG(1) << "Clique Extended: " << num_extended
1858 << " Removed: " << num_removed << " Added: " << num_added
1859 << (work_done_in_mark_descendants_ > max_num_explored_nodes
1860 ? " (Aborted)"
1861 : "");
1862 }
1863 return true;
1864}
1865
1867 absl::Span<std::vector<Literal>> at_most_ones,
1868 int64_t max_num_explored_nodes, double* dtime) {
1869 // The code below assumes a DAG.
1870 if (!DetectEquivalences()) return false;
1871 work_done_in_mark_descendants_ = 0;
1872
1873 const std::vector<std::pair<int, int>> index_size_vector =
1874 FilterAndSortAtMostOnes(at_most_ones);
1875
1876 // Data to detect inclusion of base amo into extend amo.
1877 std::vector<int> detector_clique_index;
1879 for (const auto& [index, old_size] : index_size_vector) {
1880 if (time_limit_->LimitReached()) break;
1881 detector_clique_index.push_back(index);
1882 storage.AddLiterals(at_most_ones[index]);
1883 }
1884
1885 // We use an higher limit here as the code is faster.
1886 SubsetsDetector detector(storage, time_limit_);
1887 detector.SetWorkLimit(10 * max_num_explored_nodes);
1888 detector.IndexAllStorageAsSubsets();
1889
1890 // Now try to expand one by one.
1891 //
1892 // TODO(user): We should process clique with elements in common together so
1893 // that we can reuse MarkDescendants() which is slow. We should be able to
1894 // "cache" a few of the last calls.
1895 std::vector<int> intersection;
1896 const int num_to_consider = index_size_vector.size();
1897 for (int subset_index = 0; subset_index < num_to_consider; ++subset_index) {
1898 const int index = index_size_vector[subset_index].first;
1899 std::vector<Literal>& clique = at_most_ones[index];
1900 if (clique.empty()) continue; // Was deleted.
1901
1902 if (work_done_in_mark_descendants_ > max_num_explored_nodes) break;
1903 if (detector.Stopped()) break;
1904
1905 // We start with the clique in the "intersection".
1906 // This prefix will never change.
1907 int clique_i = 0;
1908 int next_index_to_try = 0;
1909 intersection.clear();
1910 tmp_bitset_.ClearAndResize(LiteralIndex(implications_.size()));
1911 for (const Literal l : clique) {
1912 intersection.push_back(l.Index().value());
1913 tmp_bitset_.Set(l);
1914 }
1915
1916 while (true) {
1917 if (work_done_in_mark_descendants_ > max_num_explored_nodes) break;
1918 if (detector.Stopped()) break;
1919
1920 // Compute the intersection of all the element (or the new ones) of this
1921 // clique.
1922 //
1923 // Optimization: if clique_i > 0 && intersection.size() == clique.size()
1924 // we already know that we performed the max possible extension.
1925 if (clique_i > 0 && intersection.size() == clique.size()) {
1926 clique_i = clique.size();
1927 }
1928 for (; clique_i < clique.size(); ++clique_i) {
1929 const Literal l = clique[clique_i];
1930
1931 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1932 MarkDescendants(l);
1933
1934 if (clique_i == 0) {
1935 // Initially we have the clique + the negation of everything
1936 // propagated by l.
1937 for (const LiteralIndex index :
1938 is_marked_.PositionsSetAtLeastOnce()) {
1939 const Literal lit = Literal(index).Negated();
1940 if (!tmp_bitset_[lit]) {
1941 intersection.push_back(lit.Index().value());
1942 }
1943 }
1944 } else {
1945 // We intersect we the negation of everything propagated by not(l).
1946 // Note that we always keep the clique in case some implication where
1947 // not added to the graph.
1948 int new_size = 0;
1949 const int old_size = intersection.size();
1950 for (int i = 0; i < old_size; ++i) {
1951 if (i == next_index_to_try) {
1952 next_index_to_try = new_size;
1953 }
1954 const int index = intersection[i];
1955 const Literal lit = Literal(LiteralIndex(index));
1956 if (tmp_bitset_[lit] || is_marked_[lit.Negated()]) {
1957 intersection[new_size++] = index;
1958 }
1959 }
1960 intersection.resize(new_size);
1961 }
1962
1963 // We can abort early as soon as there is no extra literal than the
1964 // initial clique.
1965 if (intersection.size() <= clique.size()) break;
1966 }
1967
1968 // Should contains the original clique. If there are no more entry, then
1969 // we will not extend this clique. However, we still call FindSubsets() in
1970 // order to remove fully included ones.
1971 CHECK_GE(intersection.size(), clique.size());
1972
1973 // Look for element included in the intersection.
1974 // Note that we clear element fully included at the same time.
1975 //
1976 // TODO(user): next_index_to_try help, but we might still rescan most of
1977 // the one-watcher list of intersection[next_index_to_try], we could be
1978 // a bit faster here.
1979 int num_extra = 0;
1980 detector.FindSubsets(intersection, &next_index_to_try, [&](int subset) {
1981 if (subset == subset_index) {
1982 detector.StopProcessingCurrentSubset();
1983 return;
1984 }
1985
1986 num_extra = 0;
1987 for (const int index : storage[subset]) {
1988 const LiteralIndex lit_index = LiteralIndex(index);
1989 if (tmp_bitset_[lit_index]) continue; // In clique.
1990 tmp_bitset_.Set(lit_index);
1991 clique.push_back(Literal(lit_index)); // extend.
1992 ++num_extra;
1993 }
1994 if (num_extra == 0) {
1995 // Fully included -- remove.
1996 at_most_ones[detector_clique_index[subset]].clear();
1997 detector.StopProcessingCurrentSubset();
1998 return;
1999 }
2000
2001 detector.StopProcessingCurrentSuperset(); // Finish.
2002 });
2003
2004 // No extension: end loop.
2005 if (num_extra == 0) break;
2006 }
2007 }
2008 if (dtime != nullptr) {
2009 *dtime +=
2010 1e-8 * work_done_in_mark_descendants_ + 1e-9 * detector.work_done();
2011 }
2012 return true;
2013}
2014
2015template <bool use_weight>
2017 const absl::Span<const Literal> at_most_one,
2018 const util_intops::StrongVector<LiteralIndex, bool>& can_be_included,
2019 const util_intops::StrongVector<LiteralIndex, double>& expanded_lp_values) {
2020 std::vector<Literal> clique(at_most_one.begin(), at_most_one.end());
2021 std::vector<LiteralIndex> intersection;
2022 const int64_t old_work = work_done_in_mark_descendants_;
2023 for (int i = 0; i < clique.size(); ++i) {
2024 // Do not spend too much time here.
2025 if (work_done_in_mark_descendants_ - old_work > 1e8) break;
2026
2027 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
2028 MarkDescendants(clique[i]);
2029 if (i == 0) {
2030 for (const LiteralIndex index : is_marked_.PositionsSetAtLeastOnce()) {
2031 if (can_be_included[Literal(index).NegatedIndex()]) {
2032 intersection.push_back(index);
2033 }
2034 }
2035 for (const Literal l : clique) is_marked_.Clear(l.NegatedIndex());
2036 }
2037
2038 int new_size = 0;
2039 is_marked_.Clear(clique[i]);
2040 is_marked_.Clear(clique[i].NegatedIndex());
2041 for (const LiteralIndex index : intersection) {
2042 if (!is_marked_[index]) continue;
2043 intersection[new_size++] = index;
2044 }
2045 intersection.resize(new_size);
2046 if (intersection.empty()) break;
2047
2048 // Expand? The negation of any literal in the intersection is a valid way
2049 // to extend the clique.
2050 if (i + 1 == clique.size()) {
2051 // Heuristic: use literal with largest lp value. We randomize slightly.
2052 int index = -1;
2053 double max_lp = 0.0;
2054 for (int j = 0; j < intersection.size(); ++j) {
2055 // If we don't use weight, we prefer variable that comes first.
2056 const double lp =
2057 use_weight
2058 ? expanded_lp_values[Literal(intersection[j]).NegatedIndex()] +
2059 absl::Uniform<double>(*random_, 0.0, 1e-4)
2060 : can_be_included.size() - intersection[j].value();
2061 if (index == -1 || lp > max_lp) {
2062 index = j;
2063 max_lp = lp;
2064 }
2065 }
2066 if (index != -1) {
2067 clique.push_back(Literal(intersection[index]).Negated());
2068 std::swap(intersection.back(), intersection[index]);
2069 intersection.pop_back();
2070 }
2071 }
2072 }
2073 return clique;
2074}
2075
2076// Make sure both version are compiled.
2077template std::vector<Literal>
2079 const absl::Span<const Literal> at_most_one,
2080 const util_intops::StrongVector<LiteralIndex, bool>& can_be_included,
2081 const util_intops::StrongVector<LiteralIndex, double>& expanded_lp_values);
2082template std::vector<Literal>
2084 const absl::Span<const Literal> at_most_one,
2085 const util_intops::StrongVector<LiteralIndex, bool>& can_be_included,
2086 const util_intops::StrongVector<LiteralIndex, double>& expanded_lp_values);
2087
2088// This function and the generated cut serves two purpose:
2089// 1/ If a new clause of size 2 was discovered and not included in the LP, we
2090// will add it.
2091// 2/ The more classical clique cut separation algorithm
2092//
2093// Note that once 1/ Is performed, any literal close to 1.0 in the lp shouldn't
2094// be in the same clique as a literal with positive weight. So for step 2, we
2095// only really need to consider fractional variables.
2096const std::vector<std::vector<Literal>>&
2098 absl::Span<const Literal> literals, absl::Span<const double> lp_values,
2099 absl::Span<const double> reduced_costs) {
2100 // We only want to generate a cut with literals from the LP, not extra ones.
2101 const int num_literals = implications_.size();
2102 util_intops::StrongVector<LiteralIndex, bool> can_be_included(num_literals,
2103 false);
2105 num_literals, 0.0);
2107 num_literals, 0.0);
2108 const int size = literals.size();
2109 for (int i = 0; i < size; ++i) {
2110 const Literal l = literals[i];
2111 can_be_included[l] = true;
2112 can_be_included[l.NegatedIndex()] = true;
2113
2114 const double value = lp_values[i];
2115 expanded_lp_values[l] = value;
2116 expanded_lp_values[l.NegatedIndex()] = 1.0 - value;
2117
2118 // This is used for extending clique-cuts.
2119 // In most situation, we will only extend the cuts with literal at zero,
2120 // and we prefer "low" reduced cost first, so we negate it. Variable with
2121 // high reduced costs will likely stay that way and are of less interest in
2122 // a clique cut. At least that is my interpretation.
2123 //
2124 // TODO(user): For large problems or when we base the clique from a newly
2125 // added and violated 2-clique, we might consider only a subset of
2126 // fractional variables, so we might need to include fractional variable
2127 // first, but then their rc should be zero, so it should be already kind of
2128 // doing that.
2129 //
2130 // Remark: This seems to have a huge impact to the cut performance!
2131 const double rc = reduced_costs[i];
2132 heuristic_weights[l] = -rc;
2133 heuristic_weights[l.NegatedIndex()] = rc;
2134 }
2135
2136 // We want highest sum first.
2137 struct Candidate {
2138 Literal a;
2139 Literal b;
2140 double sum;
2141 bool operator<(const Candidate& other) const { return sum > other.sum; }
2142 };
2143 std::vector<Candidate> candidates;
2144
2145 // First heuristic. Currently we only consider violated at most one of size 2,
2146 // and extend them. Right now, the code is a bit slow to try too many at every
2147 // LP node so it is why we are defensive like this. Note also that because we
2148 // currently still statically add the initial implications, this will only add
2149 // cut based on newly learned binary clause. Or the one that were not added
2150 // to the relaxation in the first place.
2151 std::vector<Literal> fractional_literals;
2152 for (int i = 0; i < size; ++i) {
2153 Literal current_literal = literals[i];
2154 double current_value = lp_values[i];
2155 if (trail_->Assignment().LiteralIsAssigned(current_literal)) continue;
2156 if (is_redundant_[current_literal]) continue;
2157
2158 if (current_value < 0.5) {
2159 current_literal = current_literal.Negated();
2160 current_value = 1.0 - current_value;
2161 }
2162
2163 if (current_value < 0.99) {
2164 fractional_literals.push_back(current_literal);
2165 }
2166
2167 // We consider only one candidate for each current_literal.
2168 LiteralIndex best = kNoLiteralIndex;
2169 double best_value = 0.0;
2170 for (const Literal l : implications_[current_literal]) {
2171 if (!can_be_included[l]) continue;
2172 const double activity =
2173 current_value + expanded_lp_values[l.NegatedIndex()];
2174 if (activity <= 1.01) continue;
2175 const double v = activity + absl::Uniform<double>(*random_, 0.0, 1e-4);
2176 if (best == kNoLiteralIndex || v > best_value) {
2177 best_value = v;
2178 best = l.NegatedIndex();
2179 }
2180 }
2181 if (best != kNoLiteralIndex) {
2182 const double activity = current_value + expanded_lp_values[best];
2183 candidates.push_back({current_literal, Literal(best), activity});
2184 }
2185 }
2186
2187 // Do not genate too many cut at once.
2188 const int kMaxNumberOfCutPerCall = 10;
2189 std::sort(candidates.begin(), candidates.end());
2190 if (candidates.size() > kMaxNumberOfCutPerCall) {
2191 candidates.resize(kMaxNumberOfCutPerCall);
2192 }
2193
2194 // Expand to a maximal at most one each candidates before returning them.
2195 // Note that we only expand using literal from the LP.
2196 tmp_cuts_.clear();
2197 for (const Candidate& candidate : candidates) {
2198 tmp_cuts_.push_back(ExpandAtMostOneWithWeight(
2199 {candidate.a, candidate.b}, can_be_included, heuristic_weights));
2200 }
2201
2202 // Once we processed new implications, also add "proper" clique cuts.
2203 // We can generate a small graph and separate cut efficiently there.
2204 if (fractional_literals.size() > 1) {
2205 // Lets permute this randomly and truncate if we have too many variables.
2206 // Since we use bitset it is good to have a multiple of 64 there.
2207 //
2208 // TODO(user): Prefer more fractional variables.
2209 const int max_graph_size = 1024;
2210 if (fractional_literals.size() > max_graph_size) {
2211 std::shuffle(fractional_literals.begin(), fractional_literals.end(),
2212 *random_);
2213 fractional_literals.resize(max_graph_size);
2214 }
2215
2216 bron_kerbosch_.Initialize(fractional_literals.size() * 2);
2217
2218 // Prepare a dense mapping.
2219 int i = 0;
2220 tmp_mapping_.resize(implications_.size(), -1);
2221 for (const Literal l : fractional_literals) {
2222 bron_kerbosch_.SetWeight(i, expanded_lp_values[l]);
2223 tmp_mapping_[l] = i++;
2224 bron_kerbosch_.SetWeight(i, expanded_lp_values[l.Negated()]);
2225 tmp_mapping_[l.Negated()] = i++;
2226 }
2227
2228 // Copy the implication subgraph and remap it to a dense indexing.
2229 //
2230 // TODO(user): Treat at_most_one more efficiently. We can collect them
2231 // and scan each of them just once.
2232 for (const Literal base : fractional_literals) {
2233 for (const Literal l : {base, base.Negated()}) {
2234 const int from = tmp_mapping_[l];
2235 for (const Literal next : DirectImplications(l)) {
2236 // l => next so (l + not(next) <= 1).
2237 const int to = tmp_mapping_[next.Negated()];
2238 if (to != -1) {
2239 bron_kerbosch_.AddEdge(from, to);
2240 }
2241 }
2242 }
2243 }
2244
2245 // Before running the algo, compute the transitive closure.
2246 // The graph shouldn't be too large, so this should be fast enough.
2247 bron_kerbosch_.TakeTransitiveClosureOfImplicationGraph();
2248
2249 bron_kerbosch_.SetWorkLimit(1e8);
2250 bron_kerbosch_.SetMinimumWeight(1.001);
2251 std::vector<std::vector<int>> cliques = bron_kerbosch_.Run();
2252
2253 // If we have many candidates, we will only expand the first few with
2254 // maximum weights.
2255 const int max_num_per_batch = 5;
2256 std::vector<std::pair<int, double>> with_weight =
2257 bron_kerbosch_.GetMutableIndexAndWeight();
2258 if (with_weight.size() > max_num_per_batch) {
2259 std::sort(
2260 with_weight.begin(), with_weight.end(),
2261 [](const std::pair<int, double>& a, const std::pair<int, double>& b) {
2262 return a.second > b.second;
2263 });
2264 with_weight.resize(max_num_per_batch);
2265 }
2266
2267 std::vector<Literal> at_most_one;
2268 for (const auto [index, weight] : with_weight) {
2269 // Convert.
2270 at_most_one.clear();
2271 for (const int i : cliques[index]) {
2272 const Literal l = fractional_literals[i / 2];
2273 at_most_one.push_back(i % 2 == 1 ? l.Negated() : l);
2274 }
2275
2276 // Expand and add clique.
2277 //
2278 // TODO(user): Expansion is pretty slow. Given that the base clique can
2279 // share literal beeing part of the same amo, we should be able to speed
2280 // that up, we don't want to scan an amo twice basically.
2281 tmp_cuts_.push_back(ExpandAtMostOneWithWeight(
2282 at_most_one, can_be_included, heuristic_weights));
2283 }
2284
2285 // Clear the dense mapping
2286 for (const Literal l : fractional_literals) {
2287 tmp_mapping_[l] = -1;
2288 tmp_mapping_[l.Negated()] = -1;
2289 }
2290 }
2291
2292 return tmp_cuts_;
2293}
2294
2295// TODO(user): Use deterministic limit.
2296// TODO(user): Explore the graph instead of just looking at full amo, especially
2297// since we expand small ones.
2298std::vector<absl::Span<const Literal>>
2299BinaryImplicationGraph::HeuristicAmoPartition(std::vector<Literal>* literals) {
2300 std::vector<absl::Span<const Literal>> result;
2301
2303 implications_.size(), false);
2304 for (const Literal l : *literals) to_consider[l] = true;
2305
2306 // Priority queue of (intersection_size, start_of_amo).
2307 std::priority_queue<std::pair<int, int>> pq;
2308
2309 // Compute for each at most one that might overlap, its overlaping size and
2310 // stores its start in the at_most_one_buffer_.
2311 //
2312 // This is in O(num_literal in amo).
2313 absl::flat_hash_set<int> explored_amo;
2314 for (const Literal l : *literals) {
2315 if (l.Index() >= at_most_ones_.size()) continue;
2316 for (const int start : at_most_ones_[l]) {
2317 const auto [_, inserted] = explored_amo.insert(start);
2318 if (!inserted) continue;
2319
2320 int intersection_size = 0;
2321 for (const Literal l : AtMostOne(start)) {
2322 if (to_consider[l]) ++intersection_size;
2323 }
2324 if (intersection_size > 1) {
2325 pq.push({intersection_size, start});
2326 }
2327
2328 // Abort early if we are done.
2329 if (intersection_size == literals->size()) break;
2330 }
2331 }
2332
2333 // Consume AMOs, update size.
2334 int num_processed = 0;
2335 while (!pq.empty()) {
2336 const auto [old_size, start] = pq.top();
2337 pq.pop();
2338
2339 // Recompute size.
2340 int intersection_size = 0;
2341 for (const Literal l : AtMostOne(start)) {
2342 if (to_consider[l]) ++intersection_size;
2343 }
2344 if (intersection_size != old_size) {
2345 // re-add with new size.
2346 if (intersection_size > 1) {
2347 pq.push({intersection_size, start});
2348 }
2349 continue;
2350 }
2351
2352 // Mark the literal of that at most one at false.
2353 for (const Literal l : AtMostOne(start)) {
2354 to_consider[l] = false;
2355 }
2356
2357 // Extract the intersection by moving it in
2358 // [num_processed, num_processed + intersection_size).
2359 const int span_start = num_processed;
2360 for (int i = num_processed; i < literals->size(); ++i) {
2361 if (to_consider[(*literals)[i]]) continue;
2362 std::swap((*literals)[num_processed], (*literals)[i]);
2363 ++num_processed;
2364 }
2365 result.push_back(absl::MakeSpan(literals->data() + span_start,
2366 num_processed - span_start));
2367 }
2368 return result;
2369}
2370
2372 Literal root) {
2373 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
2374 return MarkDescendants(root);
2375}
2376
2377absl::Span<const Literal> BinaryImplicationGraph::MarkDescendants(
2378 Literal root) {
2379 auto* const stack = bfs_stack_.data();
2380 auto is_marked = is_marked_.BitsetView();
2381 auto is_redundant = is_redundant_.const_view();
2382
2383 int stack_size = 1;
2384 stack[0] = root;
2385 if (is_redundant[root]) return absl::MakeSpan(stack, 1);
2386 is_marked_.Set(root);
2387 const int amo_size = static_cast<int>(at_most_ones_.size());
2388 auto implies_something = implies_something_.const_view();
2389 for (int j = 0; j < stack_size; ++j) {
2390 const Literal current = stack[j];
2391 if (!implies_something[current]) continue;
2392
2393 work_done_in_mark_descendants_ += implications_[current].size();
2394 for (const Literal l : implications_[current]) {
2395 if (!is_marked[l] && !is_redundant[l]) {
2396 is_marked_.SetUnsafe(is_marked, l);
2397 stack[stack_size++] = l;
2398 }
2399 }
2400
2401 if (current.Index() >= amo_size) continue;
2402 for (const int start : at_most_ones_[current]) {
2403 work_done_in_mark_descendants_ += AtMostOne(start).size();
2404 for (const Literal l : AtMostOne(start)) {
2405 if (l == current) continue;
2406 if (!is_marked[l.NegatedIndex()] && !is_redundant[l.NegatedIndex()]) {
2407 is_marked_.SetUnsafe(is_marked, l.NegatedIndex());
2408 stack[stack_size++] = l.Negated();
2409 }
2410 }
2411 }
2412 }
2413 work_done_in_mark_descendants_ += stack_size;
2414 return absl::MakeSpan(stack, stack_size);
2415}
2416
2417std::vector<Literal> BinaryImplicationGraph::ExpandAtMostOne(
2418 const absl::Span<const Literal> at_most_one,
2419 int64_t max_num_explored_nodes) {
2420 std::vector<Literal> clique(at_most_one.begin(), at_most_one.end());
2421
2422 // Optim.
2423 for (const Literal l : clique) {
2424 if (!implies_something_[l]) {
2425 return clique;
2426 }
2427 }
2428
2429 // TODO(user): Improve algorithm. If we do a dfs, we can know if a literal
2430 // has no descendant in the current intersection. We can keep such literal
2431 // marked.
2432 std::vector<LiteralIndex> intersection;
2433 for (int i = 0; i < clique.size(); ++i) {
2434 if (work_done_in_mark_descendants_ > max_num_explored_nodes) break;
2435 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
2436 MarkDescendants(clique[i]);
2437
2438 if (i == 0) {
2439 intersection = is_marked_.PositionsSetAtLeastOnce();
2440 for (const Literal l : clique) is_marked_.Clear(l.NegatedIndex());
2441 }
2442
2443 int new_size = 0;
2444 is_marked_.Clear(clique[i].NegatedIndex()); // TODO(user): explain.
2445 for (const LiteralIndex index : intersection) {
2446 if (is_marked_[index]) intersection[new_size++] = index;
2447 }
2448 intersection.resize(new_size);
2449 if (intersection.empty()) break;
2450
2451 // TODO(user): If the intersection is small compared to the members of the
2452 // clique left to explore, we could look at the descendants of the negated
2453 // intersection instead.
2454
2455 // Expand?
2456 if (i + 1 == clique.size()) {
2457 clique.push_back(Literal(intersection.back()).Negated());
2458 intersection.pop_back();
2459 }
2460 }
2461 return clique;
2462}
2463
2464// TODO(user): lazy cleanup the lists on is_removed_?
2465// TODO(user): Mark fixed variable as is_removed_ for faster iteration?
2467 Literal literal) {
2468 DCHECK(!is_removed_[literal]);
2469
2470 // Clear old state.
2471 for (const Literal l : direct_implications_) {
2472 in_direct_implications_[l] = false;
2473 }
2474 direct_implications_.clear();
2475
2476 // Fill new state.
2477 const VariablesAssignment& assignment = trail_->Assignment();
2478 DCHECK(!assignment.LiteralIsAssigned(literal));
2479 for (const Literal l : implications_[literal]) {
2480 if (l == literal) continue;
2481 if (assignment.LiteralIsAssigned(l)) continue;
2482 if (!is_removed_[l] && !in_direct_implications_[l]) {
2483 in_direct_implications_[l] = true;
2484 direct_implications_.push_back(l);
2485 }
2486 }
2487 if (literal.Index() < at_most_ones_.size()) {
2488 if (is_redundant_[literal]) {
2489 DCHECK(at_most_ones_[literal].empty());
2490 }
2491 for (const int start : at_most_ones_[literal]) {
2492 for (const Literal l : AtMostOne(start)) {
2493 if (l == literal) continue;
2494 if (assignment.LiteralIsAssigned(l)) continue;
2495 if (!is_removed_[l] && !in_direct_implications_[l.NegatedIndex()]) {
2496 in_direct_implications_[l.NegatedIndex()] = true;
2497 direct_implications_.push_back(l.Negated());
2498 }
2499 }
2500 }
2501 }
2502 estimated_sizes_[literal] = direct_implications_.size();
2503 return direct_implications_;
2504}
2505
2506absl::Span<const Literal> BinaryImplicationGraph::AtMostOne(int start) const {
2507 const int size = at_most_one_buffer_[start].Index().value();
2508 return absl::MakeSpan(&at_most_one_buffer_[start + 1], size);
2509}
2510
2512 const int size1 = implications_[lhs].size();
2513 const int size2 =
2514 lhs.Index() < at_most_ones_.size() ? at_most_ones_[lhs].size() : 0;
2515 if (size1 + size2 == 0) return kNoLiteralIndex;
2516
2517 const int choice = absl::Uniform<int>(*random_, 0, size1 + size2);
2518 if (choice < size1) {
2519 return implications_[lhs][choice].Index();
2520 }
2521
2522 const absl::Span<const Literal> amo =
2523 AtMostOne(at_most_ones_[lhs][choice - size1]);
2524 CHECK_GE(amo.size(), 2);
2525 const int first_choice = absl::Uniform<int>(*random_, 0, amo.size());
2526 const Literal lit = amo[first_choice];
2527 if (lit != lhs) return lit.NegatedIndex();
2528
2529 // We are unlucky and just picked the wrong literal: take a different one.
2530 int next_choice = absl::Uniform<int>(*random_, 0, amo.size() - 1);
2531 if (next_choice >= first_choice) {
2532 next_choice += 1;
2533 }
2534 CHECK_NE(amo[next_choice], lhs);
2535 return amo[next_choice].NegatedIndex();
2536}
2537
2539 bool* is_unsat) {
2540 const int saved_index = propagation_trail_index_;
2541 DCHECK_EQ(propagation_trail_index_, trail_->Index()); // Propagation done.
2542
2543 const VariablesAssignment& assignment = trail_->Assignment();
2544 if (assignment.VariableIsAssigned(var)) return false;
2545
2546 const Literal literal(var, true);
2547 direct_implications_of_negated_literal_ =
2548 DirectImplications(literal.Negated());
2549 DirectImplications(literal); // Fill in_direct_implications_.
2550 for (const Literal l : direct_implications_of_negated_literal_) {
2551 if (in_direct_implications_[l]) {
2552 // not(l) => literal => l.
2553 if (!FixLiteral(l)) {
2554 *is_unsat = true;
2555 return false;
2556 }
2557 }
2558 }
2559
2560 return propagation_trail_index_ > saved_index;
2561}
2562
2564 BooleanVariable var) {
2565 const Literal literal(var, true);
2566 int64_t result = 0;
2567 direct_implications_of_negated_literal_ =
2568 DirectImplications(literal.Negated());
2569 const int64_t s1 = DirectImplications(literal).size();
2570 for (const Literal l : direct_implications_of_negated_literal_) {
2571 result += s1;
2572
2573 // We should have dealt with that in FindFailedLiteralAroundVar().
2574 DCHECK(!in_direct_implications_[l]);
2575
2576 // l => literal => l: equivalent variable!
2577 if (in_direct_implications_[l.NegatedIndex()]) result--;
2578 }
2579 return result;
2580}
2581
2582// For all possible a => var => b, add a => b.
2584 BooleanVariable var, std::deque<std::vector<Literal>>* postsolve_clauses) {
2585 const Literal literal(var, true);
2586 DCHECK(!is_removed_[literal.Index()]);
2587 DCHECK(!is_redundant_[literal.Index()]);
2588
2589 direct_implications_of_negated_literal_ =
2590 DirectImplications(literal.Negated());
2591 for (const Literal b : DirectImplications(literal)) {
2592 if (is_removed_[b]) continue;
2593 DCHECK(!is_redundant_[b]);
2594 estimated_sizes_[b.NegatedIndex()]--;
2595 for (const Literal a_negated : direct_implications_of_negated_literal_) {
2596 if (a_negated.Negated() == b) continue;
2597 if (is_removed_[a_negated]) continue;
2598 AddImplication(a_negated.Negated(), b);
2599 }
2600 }
2601 for (const Literal a_negated : direct_implications_of_negated_literal_) {
2602 if (is_removed_[a_negated]) continue;
2603 DCHECK(!is_redundant_[a_negated]);
2604 estimated_sizes_[a_negated.NegatedIndex()]--;
2605 }
2606
2607 // Notify the deletion to the proof checker and the postsolve.
2608 // Note that we want var first in these clauses for the postsolve.
2609 for (const Literal b : direct_implications_) {
2610 if (drat_proof_handler_ != nullptr) {
2611 drat_proof_handler_->DeleteClause({Literal(var, false), b});
2612 }
2613 postsolve_clauses->push_back({Literal(var, false), b});
2614 }
2615 for (const Literal a_negated : direct_implications_of_negated_literal_) {
2616 if (drat_proof_handler_ != nullptr) {
2617 drat_proof_handler_->DeleteClause({Literal(var, true), a_negated});
2618 }
2619 postsolve_clauses->push_back({Literal(var, true), a_negated});
2620 }
2621
2622 // We need to remove any occurrence of var in our implication lists, this will
2623 // be delayed to the CleanupAllRemovedVariables() call.
2624 for (const LiteralIndex index : {literal.Index(), literal.NegatedIndex()}) {
2625 is_removed_[index] = true;
2626 implications_[index].clear();
2627 if (!is_redundant_[index]) {
2628 ++num_redundant_literals_;
2629 is_redundant_.Set(index);
2630 }
2631 }
2632}
2633
2635 std::deque<std::vector<Literal>>* postsolve_clauses) {
2636 for (LiteralIndex a(0); a < implications_.size(); ++a) {
2637 if (is_redundant_[a] && !is_removed_[a]) {
2638 postsolve_clauses->push_back(
2640 is_removed_[a] = true;
2641 gtl::STLClearObject(&(implications_[a]));
2642 continue;
2643 }
2644
2645 int new_size = 0;
2646 auto& implication = implications_[a];
2647 for (const Literal l : implication) {
2648 if (!is_redundant_[l]) {
2649 implication[new_size++] = l;
2650 }
2651 }
2652 implication.resize(new_size);
2653 }
2654}
2655
2657 const VariablesAssignment& assignment = trail_->Assignment();
2658 for (LiteralIndex a(0); a < implications_.size(); ++a) {
2659 if (is_removed_[a] || assignment.LiteralIsAssigned(Literal(a))) {
2660 if (DEBUG_MODE && assignment.LiteralIsTrue(Literal(a))) {
2661 // The code assumes that everything is already propagated.
2662 // Otherwise we will remove implications that didn't propagate yet!
2663 for (const Literal lit : implications_[a]) {
2664 DCHECK(trail_->Assignment().LiteralIsTrue(lit));
2665 }
2666 }
2667
2668 gtl::STLClearObject(&(implications_[a]));
2669 continue;
2670 }
2671
2672 int new_size = 0;
2673 auto& implication = implications_[a];
2674 for (const Literal l : implication) {
2675 if (!is_removed_[l] && !assignment.LiteralIsTrue(l)) {
2676 implication[new_size++] = l;
2677 }
2678 }
2679 implication.resize(new_size);
2680 }
2681
2682 // Clean-up at most ones.
2683 at_most_ones_.clear();
2684 CHECK(CleanUpAndAddAtMostOnes(/*base_index=*/0));
2685
2686 // Note that to please the invariant() we also removed fixed literal above.
2687 DCHECK(InvariantsAreOk());
2688}
2689
2690bool BinaryImplicationGraph::InvariantsAreOk() {
2691 if (time_limit_->LimitReached()) return true;
2692 // We check that if a => b then not(b) => not(a).
2693 absl::flat_hash_set<std::pair<LiteralIndex, LiteralIndex>> seen;
2694 int num_redundant = 0;
2695 int num_fixed = 0;
2696 TimeLimitCheckEveryNCalls time_limit_check(100, time_limit_);
2697 for (LiteralIndex a_index(0); a_index < implications_.size(); ++a_index) {
2698 if (time_limit_check.LimitReached()) return true;
2699 if (trail_->Assignment().LiteralIsAssigned(Literal(a_index))) {
2700 ++num_fixed;
2701 if (!implications_[a_index].empty()) {
2702 LOG(ERROR) << "Fixed literal has non-cleared implications";
2703 return false;
2704 }
2705 continue;
2706 }
2707 if (is_removed_[a_index]) {
2708 if (!implications_[a_index].empty()) {
2709 LOG(ERROR) << "Removed literal has non-cleared implications";
2710 return false;
2711 }
2712 continue;
2713 }
2714 if (is_redundant_[a_index]) {
2715 ++num_redundant;
2716 if (implications_[a_index].size() != 1) {
2717 LOG(ERROR)
2718 << "Redundant literal should only point to its representative "
2719 << Literal(a_index) << " => " << implications_[a_index];
2720 return false;
2721 }
2722 }
2723 for (const Literal b : implications_[a_index]) {
2724 seen.insert({a_index, b.Index()});
2725 }
2726 }
2727
2728 // Check that reverse topo order is correct.
2729 util_intops::StrongVector<LiteralIndex, int> lit_to_order;
2730 if (is_dag_) {
2731 lit_to_order.assign(implications_.size(), -1);
2732 for (int i = 0; i < reverse_topological_order_.size(); ++i) {
2733 lit_to_order[reverse_topological_order_[i]] = i;
2734 }
2735 }
2736
2737 VLOG(2) << "num_redundant " << num_redundant;
2738 VLOG(2) << "num_fixed " << num_fixed;
2739 for (LiteralIndex a_index(0); a_index < implications_.size(); ++a_index) {
2740 if (time_limit_check.LimitReached()) return true;
2741 const LiteralIndex not_a_index = Literal(a_index).NegatedIndex();
2742 for (const Literal b : implications_[a_index]) {
2743 if (is_removed_[b]) {
2744 LOG(ERROR) << "A removed literal still appear! " << Literal(a_index)
2745 << " => " << b;
2746 return false;
2747 }
2748
2749 if (!seen.contains({b.NegatedIndex(), not_a_index})) {
2750 LOG(ERROR) << "We have " << Literal(a_index) << " => " << b
2751 << " but not the reverse implication!";
2752 LOG(ERROR) << "redundant[a]: " << is_redundant_[a_index]
2753 << " assigned[a]: "
2754 << trail_->Assignment().LiteralIsAssigned(Literal(a_index))
2755 << " removed[a]: " << is_removed_[a_index]
2756 << " redundant[b]: " << is_redundant_[b] << " assigned[b]: "
2757 << trail_->Assignment().LiteralIsAssigned(b)
2758 << " removed[b]: " << is_removed_[b];
2759
2760 return false;
2761 }
2762
2763 // Test that if we have a dag, our topo order is correct.
2764 if (is_dag_ && !is_redundant_[b] && !is_redundant_[a_index]) {
2765 DCHECK_NE(lit_to_order[b], -1);
2766 DCHECK_LE(lit_to_order[b], lit_to_order[a_index]);
2767 }
2768 }
2769 }
2770
2771 // Check the at-most ones.
2772 absl::flat_hash_set<std::pair<LiteralIndex, int>> lit_to_start;
2773 for (LiteralIndex i(0); i < at_most_ones_.size(); ++i) {
2774 for (const int start : at_most_ones_[i]) {
2775 lit_to_start.insert({i, start});
2776 }
2777 }
2778
2779 for (int start = 0; start < at_most_one_buffer_.size();) {
2780 const absl::Span<const Literal> amo = AtMostOne(start);
2781 for (const Literal l : amo) {
2782 if (is_removed_[l]) {
2783 LOG(ERROR) << "A removed literal still appear in an amo " << l;
2784 return false;
2785 }
2786 if (!lit_to_start.contains({l, start})) {
2787 return false;
2788 }
2789 }
2790 start += amo.size() + 1;
2791 }
2792
2793 return true;
2794}
2795
2796absl::Span<const Literal> BinaryImplicationGraph::NextAtMostOne() {
2797 if (at_most_one_iterator_ >= at_most_one_buffer_.size()) {
2798 return absl::Span<const Literal>();
2799 }
2800
2801 const absl::Span<const Literal> result = AtMostOne(at_most_one_iterator_);
2802 at_most_one_iterator_ += result.size() + 1;
2803 return result;
2804}
2805
2806// ----- SatClause -----
2807
2808// static
2809SatClause* SatClause::Create(absl::Span<const Literal> literals) {
2810 DCHECK_GE(literals.size(), 2);
2811 SatClause* clause = reinterpret_cast<SatClause*>(
2812 ::operator new(sizeof(SatClause) + literals.size() * sizeof(Literal)));
2813 clause->size_ = literals.size();
2814 for (int i = 0; i < literals.size(); ++i) {
2815 clause->literals_[i] = literals[i];
2816 }
2817 return clause;
2818}
2819
2820// Note that for an attached clause, removing fixed literal is okay because if
2821// any of the watched literal is assigned, then the clause is necessarily true.
2823 const VariablesAssignment& assignment) {
2824 DCHECK(!IsRemoved());
2825 if (assignment.VariableIsAssigned(literals_[0].Variable()) ||
2826 assignment.VariableIsAssigned(literals_[1].Variable())) {
2827 DCHECK(IsSatisfied(assignment));
2828 return true;
2829 }
2830 int j = 2;
2831 while (j < size_ && !assignment.VariableIsAssigned(literals_[j].Variable())) {
2832 ++j;
2833 }
2834 for (int i = j; i < size_; ++i) {
2835 if (assignment.VariableIsAssigned(literals_[i].Variable())) {
2836 if (assignment.LiteralIsTrue(literals_[i])) return true;
2837 } else {
2838 std::swap(literals_[j], literals_[i]);
2839 ++j;
2840 }
2841 }
2842 size_ = j;
2843 return false;
2844}
2845
2846bool SatClause::IsSatisfied(const VariablesAssignment& assignment) const {
2847 for (const Literal literal : *this) {
2848 if (assignment.LiteralIsTrue(literal)) return true;
2849 }
2850 return false;
2851}
2852
2853std::string SatClause::DebugString() const {
2854 std::string result;
2855 for (const Literal literal : *this) {
2856 if (!result.empty()) result.append(" ");
2857 result.append(literal.DebugString());
2858 }
2859 return result;
2860}
2861
2862} // namespace sat
2863} // namespace operations_research
void FindStronglyConnectedComponents(const NodeIndex num_nodes, const Graph &graph, SccOutput *components)
double Get() const
Definition timer.h:44
void Start()
When Start() is called multiple times, only the most recent is used.
Definition timer.h:30
ConstView const_view() const
Definition bitset.h:459
void Set(IntegerType index)
Definition bitset.h:878
Bitset64< IntegerType >::View BitsetView()
A bit hacky for really hot loop.
Definition bitset.h:886
void SetUnsafe(typename Bitset64< IntegerType >::View view, IntegerType index)
Definition bitset.h:890
bool ComputeTransitiveReduction(bool log_info=false)
Definition clause.cc:1492
Literal RepresentativeOf(Literal l) const
Definition clause.h:626
void Resize(int num_variables)
Resizes the data structure.
Definition clause.cc:541
const std::vector< std::vector< Literal > > & GenerateAtMostOnesWithLargeWeight(absl::Span< const Literal > literals, absl::Span< const double > lp_values, absl::Span< const double > reduced_costs)
Definition clause.cc:2097
bool Propagate(Trail *trail) final
SatPropagator interface.
Definition clause.cc:816
void RemoveDuplicates()
Clean up implications list that might have duplicates.
Definition clause.cc:560
void MinimizeConflictWithReachability(std::vector< Literal > *c)
Definition clause.cc:908
std::vector< Literal > ExpandAtMostOneWithWeight(absl::Span< const Literal > at_most_one, const util_intops::StrongVector< LiteralIndex, bool > &can_be_included, const util_intops::StrongVector< LiteralIndex, double > &expanded_lp_values)
Same as ExpandAtMostOne() but try to maximize the weight in the clique.
Definition clause.cc:2016
bool TransformIntoMaxCliques(std::vector< std::vector< Literal > > *at_most_ones, int64_t max_num_explored_nodes=1e8)
Definition clause.cc:1762
absl::Span< const Literal > GetAllImpliedLiterals(Literal root)
Definition clause.cc:2371
int64_t NumImplicationOnVariableRemoval(BooleanVariable var)
Definition clause.cc:2563
bool IsEmpty() const final
Returns true if no constraints where ever added to this class.
Definition clause.h:528
bool AddBinaryClause(Literal a, Literal b)
Definition clause.cc:572
absl::Span< const Literal > NextAtMostOne()
Returns the next at_most_one, or a span of size 0 when finished.
Definition clause.cc:2796
bool AddImplication(Literal a, Literal b)
Definition clause.h:541
void RemoveBooleanVariable(BooleanVariable var, std::deque< std::vector< Literal > > *postsolve_clauses)
For all possible a => var => b, add a => b.
Definition clause.cc:2583
void MinimizeConflictExperimental(const Trail &trail, std::vector< Literal > *c)
Definition clause.cc:1069
void MinimizeConflictFirstWithTransitiveReduction(const Trail &trail, std::vector< Literal > *c, absl::BitGenRef random)
Definition clause.cc:1007
bool DetectEquivalences(bool log_info=false)
Definition clause.cc:1292
void MinimizeConflictFirst(const Trail &trail, std::vector< Literal > *c, SparseBitset< BooleanVariable > *marked)
Definition clause.cc:985
const std::vector< Literal > & DirectImplications(Literal literal)
Definition clause.cc:2466
absl::Span< const Literal > Reason(const Trail &trail, int trail_index, int64_t conflict_id) const final
Definition clause.cc:896
bool MergeAtMostOnes(absl::Span< std::vector< Literal > > at_most_ones, int64_t max_num_explored_nodes=1e8, double *dtime=nullptr)
Definition clause.cc:1866
void RemoveAllRedundantVariables(std::deque< std::vector< Literal > > *postsolve_clauses)
Definition clause.cc:2634
bool FindFailedLiteralAroundVar(BooleanVariable var, bool *is_unsat)
Definition clause.cc:2538
LiteralIndex RandomImpliedLiteral(Literal lhs)
Definition clause.cc:2511
ABSL_MUST_USE_RESULT bool AddAtMostOne(absl::Span< const Literal > at_most_one)
Definition clause.cc:632
std::vector< absl::Span< const Literal > > HeuristicAmoPartition(std::vector< Literal > *literals)
Definition clause.cc:2299
SatClause * InprocessingAddClause(absl::Span< const Literal > new_clause)
Definition clause.cc:439
void LazyDetach(SatClause *clause)
Definition clause.cc:318
void InprocessingRemoveClause(SatClause *clause)
These must only be called between [Detach/Attach]AllClauses() calls.
Definition clause.cc:382
bool IsRemovable(SatClause *const clause) const
Definition clause.h:225
void Detach(SatClause *clause)
Definition clause.cc:325
absl::Span< const Literal > Reason(const Trail &trail, int trail_index, int64_t conflict_id) const final
Definition clause.cc:213
bool AddClause(absl::Span< const Literal > literals, Trail *trail, int lbd)
Adds a new clause and perform initial propagation for this clause only.
Definition clause.cc:227
SatClause * NextClauseToProbe()
Returns the next clause to probe in round-robin order.
Definition clause.cc:531
ClauseManager(Model *model)
--— ClauseManager --—
Definition clause.cc:75
SatClause * ReasonClause(int trail_index) const
Definition clause.cc:219
SatClause * AddRemovableClause(absl::Span< const Literal > literals, Trail *trail, int lbd)
Definition clause.cc:235
void Resize(int num_variables)
Must be called before adding clauses referring to such variables.
Definition clause.cc:91
ABSL_MUST_USE_RESULT bool InprocessingFixLiteral(Literal true_literal)
This one do not need the clause to be detached.
Definition clause.cc:363
void Attach(SatClause *clause, Trail *trail)
Definition clause.cc:298
ABSL_MUST_USE_RESULT bool InprocessingRewriteClause(SatClause *clause, absl::Span< const Literal > new_clause)
Definition clause.cc:391
bool Propagate(Trail *trail) final
SatPropagator API.
Definition clause.cc:204
int AddLiterals(const std::vector< L > &wrapped_values)
Definition util.h:780
size_t size() const
Size of the "key" space, always in [0, size()).
Definition util.h:841
void reserve(int size)
Reserve memory if it is already known or tightly estimated.
Definition util.h:89
void AddClause(absl::Span< const Literal > clause)
void DeleteClause(absl::Span< const Literal > clause)
void DetectInclusions(const std::function< void(int subset, int superset)> &process)
Definition inclusion.h:282
void SetWorkLimit(uint64_t work_limit)
Definition inclusion.h:86
std::string DebugString() const
Definition sat_base.h:100
LiteralIndex NegatedIndex() const
Definition sat_base.h:92
LiteralIndex Index() const
Definition sat_base.h:91
BooleanVariable Variable() const
Definition sat_base.h:87
static SatClause * Create(absl::Span< const Literal > literals)
--— SatClause --—
Definition clause.cc:2809
absl::Span< const Literal > AsSpan() const
Returns a Span<> representation of the clause.
Definition clause.h:99
const Literal * begin() const
Allows for range based iteration: for (Literal literal : clause) {}.
Definition clause.h:78
bool IsSatisfied(const VariablesAssignment &assignment) const
Definition clause.cc:2846
int size() const
Number of literals in the clause.
Definition clause.h:69
std::string DebugString() const
Definition clause.cc:2853
bool RemoveFixedLiteralsAndTestIfTrue(const VariablesAssignment &assignment)
Definition clause.cc:2822
SatPropagator(const std::string &name)
Definition sat_base.h:535
util_intops::StrongVector< LiteralIndex, absl::InlinedVector< int32_t, 6 > > AtMostOnes
Definition clause.cc:1187
SccGraph(SccFinder *finder, Implications *graph, AtMostOnes *at_most_ones, std::vector< Literal > *at_most_one_buffer)
Definition clause.cc:1193
int64_t work_done_
For the deterministic time.
Definition clause.cc:1277
util_intops::StrongVector< LiteralIndex, absl::InlinedVector< Literal, 6 > > Implications
Definition clause.cc:1185
StronglyConnectedComponentsFinder< int32_t, SccGraph, CompactVectorVector< int32_t, int32_t > > SccFinder
Definition clause.cc:1189
std::vector< Literal > to_fix_
All these literals where detected to be true during the SCC computation.
Definition clause.cc:1274
const std::vector< int32_t > & operator[](int32_t node) const
Definition clause.cc:1201
void FindSubsets(absl::Span< const int > superset, int *next_index_to_try, const std::function< void(int subset)> &process)
Definition inclusion.h:480
void SetWorkLimit(uint64_t work_limit)
Definition inclusion.h:176
std::vector< Literal > * MutableConflict()
Definition sat_base.h:433
void Enqueue(Literal true_literal, int propagator_id)
Definition sat_base.h:317
void EnqueueWithUnitReason(Literal true_literal)
Specific Enqueue() version for a fixed variable.
Definition sat_base.h:328
const AssignmentInfo & Info(BooleanVariable var) const
Definition sat_base.h:463
void SetCurrentPropagatorId(int propagator_id)
Definition sat_base.h:307
void FastEnqueue(Literal true_literal)
Definition sat_base.h:310
const VariablesAssignment & Assignment() const
Definition sat_base.h:462
bool VariableIsAssigned(BooleanVariable var) const
Returns true iff the given variable is assigned.
Definition sat_base.h:196
bool LiteralIsAssigned(Literal literal) const
Definition sat_base.h:191
bool LiteralIsFalse(Literal literal) const
Definition sat_base.h:185
bool LiteralIsTrue(Literal literal) const
Definition sat_base.h:188
void assign(size_type n, const value_type &val)
void push_back(const value_type &val)
int RemoveIf(RepeatedPtrField< T > *array, const Pred &pr)
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:55
void STLDeleteElements(T *container)
Definition stl_util.h:369
void STLClearObject(T *obj)
Definition stl_util.h:120
const LiteralIndex kNoLiteralIndex(-1)
In SWIG mode, we don't want anything besides these top-level includes.
ClosedInterval::Iterator end(ClosedInterval interval)
const bool DEBUG_MODE
Definition radix_sort.h:266
trees with all degrees equal to
#define IF_STATS_ENABLED(instructions)
Definition stats.h:414
#define SCOPED_TIME_STAT(stats)
Definition stats.h:421