Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
clause.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/clause.h"
15
16#include <stddef.h>
17
18#include <algorithm>
19#include <cstdint>
20#include <deque>
21#include <new>
22#include <queue>
23#include <string>
24#include <utility>
25#include <vector>
26
27#include "absl/container/flat_hash_map.h"
28#include "absl/container/flat_hash_set.h"
29#include "absl/container/inlined_vector.h"
30#include "absl/log/check.h"
31#include "absl/random/bit_gen_ref.h"
32#include "absl/random/distributions.h"
33#include "absl/types/span.h"
37#include "ortools/base/timer.h"
41#include "ortools/sat/model.h"
43#include "ortools/sat/util.h"
44#include "ortools/util/bitset.h"
45#include "ortools/util/stats.h"
48namespace operations_research {
49namespace sat {
50
51namespace {
52
53// Returns true if the given watcher list contains the given clause.
54template <typename Watcher>
55bool WatcherListContains(const std::vector<Watcher>& list,
56 const SatClause& candidate) {
57 for (const Watcher& watcher : list) {
58 if (watcher.clause == &candidate) return true;
59 }
60 return false;
61}
62
63// A simple wrapper to simplify the erase(std::remove_if()) pattern.
64template <typename Container, typename Predicate>
65void RemoveIf(Container c, Predicate p) {
66 c->erase(std::remove_if(c->begin(), c->end(), p), c->end());
67}
68
69} // namespace
70
71// ----- ClauseManager -----
72
74 : SatPropagator("ClauseManager"),
75 implication_graph_(model->GetOrCreate<BinaryImplicationGraph>()),
76 trail_(model->GetOrCreate<Trail>()),
77 num_inspected_clauses_(0),
78 num_inspected_clause_literals_(0),
79 num_watched_clauses_(0),
80 stats_("ClauseManager") {
81 trail_->RegisterPropagator(this);
82}
83
85 gtl::STLDeleteElements(&clauses_);
86 IF_STATS_ENABLED(LOG(INFO) << stats_.StatString());
87}
88
89void ClauseManager::Resize(int num_variables) {
90 DCHECK(is_clean_);
91 watchers_on_false_.resize(num_variables << 1);
92 reasons_.resize(num_variables);
93 needs_cleaning_.Resize(LiteralIndex(num_variables << 1));
94}
95
96// Note that this is the only place where we add Watcher so the DCHECK
97// guarantees that there are no duplicates.
98void ClauseManager::AttachOnFalse(Literal literal, Literal blocking_literal,
99 SatClause* clause) {
100 SCOPED_TIME_STAT(&stats_);
101 DCHECK(is_clean_);
102 DCHECK(!WatcherListContains(watchers_on_false_[literal], *clause));
103 watchers_on_false_[literal].push_back(Watcher(clause, blocking_literal));
104}
105
106bool ClauseManager::PropagateOnFalse(Literal false_literal, Trail* trail) {
107 SCOPED_TIME_STAT(&stats_);
108 DCHECK(is_clean_);
109 std::vector<Watcher>& watchers = watchers_on_false_[false_literal];
110 const auto assignment = AssignmentView(trail->Assignment());
111
112 // Note(user): It sounds better to inspect the list in order, this is because
113 // small clauses like binary or ternary clauses will often propagate and thus
114 // stay at the beginning of the list.
115 auto new_it = watchers.begin();
116 const auto end = watchers.end();
117 while (new_it != end && assignment.LiteralIsTrue(new_it->blocking_literal)) {
118 ++new_it;
119 }
120 for (auto it = new_it; it != end; ++it) {
121 // Don't even look at the clause memory if the blocking literal is true.
122 if (assignment.LiteralIsTrue(it->blocking_literal)) {
123 *new_it++ = *it;
124 continue;
125 }
126 ++num_inspected_clauses_;
127
128 // If the other watched literal is true, just change the blocking literal.
129 // Note that we use the fact that the first two literals of the clause are
130 // the ones currently watched.
131 Literal* literals = it->clause->literals();
132 const Literal other_watched_literal(
133 LiteralIndex(literals[0].Index().value() ^ literals[1].Index().value() ^
134 false_literal.Index().value()));
135 if (assignment.LiteralIsTrue(other_watched_literal)) {
136 *new_it = *it;
137 new_it->blocking_literal = other_watched_literal;
138 ++new_it;
139 ++num_inspected_clause_literals_;
140 continue;
141 }
142
143 // Look for another literal to watch. We go through the list in a cyclic
144 // fashion from start. The first two literals can be ignored as they are the
145 // watched ones.
146 {
147 const int start = it->start_index;
148 const int size = it->clause->size();
149 DCHECK_GE(start, 2);
150
151 int i = start;
152 while (i < size && assignment.LiteralIsFalse(literals[i])) ++i;
153 num_inspected_clause_literals_ += i - start + 2;
154 if (i >= size) {
155 i = 2;
156 while (i < start && assignment.LiteralIsFalse(literals[i])) ++i;
157 num_inspected_clause_literals_ += i - 2;
158 if (i >= start) i = size;
159 }
160 if (i < size) {
161 // literal[i] is unassigned or true, it's now the new literal to watch.
162 // Note that by convention, we always keep the two watched literals at
163 // the beginning of the clause.
164 literals[0] = other_watched_literal;
165 literals[1] = literals[i];
166 literals[i] = false_literal;
167 watchers_on_false_[literals[1]].emplace_back(
168 it->clause, other_watched_literal, i + 1);
169 continue;
170 }
171 }
172
173 // At this point other_watched_literal is either false or unassigned, all
174 // other literals are false.
175 if (assignment.LiteralIsFalse(other_watched_literal)) {
176 // Conflict: All literals of it->clause are false.
177 //
178 // Note(user): we could avoid a copy here, but the conflict analysis
179 // complexity will be a lot higher than this anyway.
180 trail->MutableConflict()->assign(it->clause->begin(), it->clause->end());
181 trail->SetFailingSatClause(it->clause);
182 num_inspected_clause_literals_ += it - watchers.begin() + 1;
183 watchers.erase(new_it, it);
184 return false;
185 } else {
186 // Propagation: other_watched_literal is unassigned, set it to true and
187 // put it at position 0. Note that the position 0 is important because
188 // we will need later to recover the literal that was propagated from the
189 // clause using this convention.
190 literals[0] = other_watched_literal;
191 literals[1] = false_literal;
192 reasons_[trail->Index()] = it->clause;
193 trail->Enqueue(other_watched_literal, propagator_id_);
194 *new_it++ = *it;
195 }
196 }
197 num_inspected_clause_literals_ += watchers.size(); // The blocking ones.
198 watchers.erase(new_it, end);
199 return true;
200}
201
203 const int old_index = trail->Index();
204 while (trail->Index() == old_index && propagation_trail_index_ < old_index) {
205 const Literal literal = (*trail)[propagation_trail_index_++];
206 if (!PropagateOnFalse(literal.Negated(), trail)) return false;
207 }
208 return true;
209}
210
211absl::Span<const Literal> ClauseManager::Reason(const Trail& /*trail*/,
212 int trail_index,
213 int64_t /*conflict_id*/) const {
214 return reasons_[trail_index]->PropagationReason();
215}
216
217SatClause* ClauseManager::ReasonClause(int trail_index) const {
218 return reasons_[trail_index];
219}
220
221bool ClauseManager::AddClause(absl::Span<const Literal> literals) {
222 return AddClause(literals, trail_, -1);
223}
224
225bool ClauseManager::AddClause(absl::Span<const Literal> literals, Trail* trail,
226 int lbd) {
227 SatClause* clause = SatClause::Create(literals);
228 clauses_.push_back(clause);
229 if (add_clause_callback_ != nullptr) add_clause_callback_(lbd, literals);
230 return AttachAndPropagate(clause, trail);
231}
232
233SatClause* ClauseManager::AddRemovableClause(absl::Span<const Literal> literals,
234 Trail* trail, int lbd) {
235 SatClause* clause = SatClause::Create(literals);
236 clauses_.push_back(clause);
237 if (add_clause_callback_ != nullptr) add_clause_callback_(lbd, literals);
238 CHECK(AttachAndPropagate(clause, trail));
239 return clause;
240}
241
242// Sets up the 2-watchers data structure. It selects two non-false literals
243// and attaches the clause to the event: one of the watched literals become
244// false. It returns false if the clause only contains literals assigned to
245// false. If only one literals is not false, it propagates it to true if it
246// is not already assigned.
247bool ClauseManager::AttachAndPropagate(SatClause* clause, Trail* trail) {
248 SCOPED_TIME_STAT(&stats_);
249
250 const int size = clause->size();
251 Literal* literals = clause->literals();
252
253 // Select the first two literals that are not assigned to false and put them
254 // on position 0 and 1.
255 int num_literal_not_false = 0;
256 for (int i = 0; i < size; ++i) {
257 if (!trail->Assignment().LiteralIsFalse(literals[i])) {
258 std::swap(literals[i], literals[num_literal_not_false]);
259 ++num_literal_not_false;
260 if (num_literal_not_false == 2) {
261 break;
262 }
263 }
264 }
265
266 // Returns false if all the literals were false.
267 // This should only happen on an UNSAT problem, and there is no need to attach
268 // the clause in this case.
269 if (num_literal_not_false == 0) return false;
270
271 if (num_literal_not_false == 1) {
272 // To maintain the validity of the 2-watcher algorithm, we need to watch
273 // the false literal with the highest decision level.
274 int max_level = trail->Info(literals[1].Variable()).level;
275 for (int i = 2; i < size; ++i) {
276 const int level = trail->Info(literals[i].Variable()).level;
277 if (level > max_level) {
278 max_level = level;
279 std::swap(literals[1], literals[i]);
280 }
281 }
282
283 // Propagates literals[0] if it is unassigned.
284 if (!trail->Assignment().LiteralIsTrue(literals[0])) {
285 reasons_[trail->Index()] = clause;
286 trail->Enqueue(literals[0], propagator_id_);
287 }
288 }
289
290 ++num_watched_clauses_;
291 AttachOnFalse(literals[0], literals[1], clause);
292 AttachOnFalse(literals[1], literals[0], clause);
293 return true;
294}
295
296void ClauseManager::Attach(SatClause* clause, Trail* trail) {
297 Literal* literals = clause->literals();
298 DCHECK(!trail->Assignment().LiteralIsAssigned(literals[0]));
299 DCHECK(!trail->Assignment().LiteralIsAssigned(literals[1]));
300
301 ++num_watched_clauses_;
302 AttachOnFalse(literals[0], literals[1], clause);
303 AttachOnFalse(literals[1], literals[0], clause);
304}
305
306void ClauseManager::InternalDetach(SatClause* clause) {
307 --num_watched_clauses_;
308 const size_t size = clause->size();
309 if (drat_proof_handler_ != nullptr && size > 2) {
310 drat_proof_handler_->DeleteClause({clause->begin(), size});
311 }
312 clauses_info_.erase(clause);
313 clause->Clear();
314}
315
317 InternalDetach(clause);
318 is_clean_ = false;
319 needs_cleaning_.Set(clause->FirstLiteral());
320 needs_cleaning_.Set(clause->SecondLiteral());
321}
322
324 InternalDetach(clause);
325 for (const Literal l : {clause->FirstLiteral(), clause->SecondLiteral()}) {
326 needs_cleaning_.Clear(l);
327 RemoveIf(&(watchers_on_false_[l]), [](const Watcher& watcher) {
328 return watcher.clause->IsRemoved();
329 });
330 }
331}
332
334 if (!all_clauses_are_attached_) return;
335 all_clauses_are_attached_ = false;
336
337 // This is easy, and this allows to reset memory if some watcher lists where
338 // really long at some point.
339 is_clean_ = true;
340 num_watched_clauses_ = 0;
341 watchers_on_false_.clear();
342}
343
345 if (all_clauses_are_attached_) return;
346 all_clauses_are_attached_ = true;
347
348 needs_cleaning_.ClearAll(); // This doesn't resize it.
349 watchers_on_false_.resize(needs_cleaning_.size().value());
350
352 for (SatClause* clause : clauses_) {
353 ++num_watched_clauses_;
354 DCHECK_GE(clause->size(), 2);
355 AttachOnFalse(clause->FirstLiteral(), clause->SecondLiteral(), clause);
356 AttachOnFalse(clause->SecondLiteral(), clause->FirstLiteral(), clause);
357 }
358}
359
360// This one do not need the clause to be detached.
362 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
363 if (drat_proof_handler_ != nullptr) {
364 drat_proof_handler_->AddClause({true_literal});
365 }
366 // TODO(user): remove the test when the DRAT issue with fixed literal is
367 // resolved.
368 if (!trail_->Assignment().LiteralIsTrue(true_literal)) {
369 trail_->EnqueueWithUnitReason(true_literal);
370
371 // Even when all clauses are detached, we can propagate the implication
372 // graph and we do that right away.
373 return implication_graph_->Propagate(trail_);
374 }
375 return true;
376}
377
378// TODO(user): We could do something slower if the clauses are attached like
379// we do for InprocessingRewriteClause().
381 DCHECK(!all_clauses_are_attached_);
382 if (drat_proof_handler_ != nullptr) {
383 drat_proof_handler_->DeleteClause(clause->AsSpan());
384 }
385 clauses_info_.erase(clause);
386 clause->Clear();
387}
388
390 SatClause* clause, absl::Span<const Literal> new_clause) {
391 if (new_clause.empty()) return false; // UNSAT.
392
393 if (DEBUG_MODE) {
394 for (const Literal l : new_clause) {
395 DCHECK(!trail_->Assignment().LiteralIsAssigned(l));
396 }
397 }
398
399 if (new_clause.size() == 1) {
400 if (!InprocessingFixLiteral(new_clause[0])) return false;
402 return true;
403 }
404
405 if (new_clause.size() == 2) {
406 CHECK(implication_graph_->AddBinaryClause(new_clause[0], new_clause[1]));
408 return true;
409 }
410
411 if (drat_proof_handler_ != nullptr) {
412 // We must write the new clause before we delete the old one.
413 drat_proof_handler_->AddClause(new_clause);
414 drat_proof_handler_->DeleteClause(clause->AsSpan());
415 }
416
417 if (all_clauses_are_attached_) {
418 // We can still rewrite the clause, but it is inefficient. We first
419 // detach it in a non-lazy way.
420 --num_watched_clauses_;
421 clause->Clear();
422 for (const Literal l : {clause->FirstLiteral(), clause->SecondLiteral()}) {
423 needs_cleaning_.Clear(l);
424 RemoveIf(&(watchers_on_false_[l]), [](const Watcher& watcher) {
425 return watcher.clause->IsRemoved();
426 });
427 }
428 }
429
430 clause->Rewrite(new_clause);
431
432 // And we reattach it.
433 if (all_clauses_are_attached_) Attach(clause, trail_);
434 return true;
435}
436
438 absl::Span<const Literal> new_clause) {
439 DCHECK(!new_clause.empty());
440 DCHECK(!all_clauses_are_attached_);
441 if (DEBUG_MODE) {
442 for (const Literal l : new_clause) {
443 DCHECK(!trail_->Assignment().LiteralIsAssigned(l));
444 }
445 }
446
447 if (new_clause.size() == 1) {
448 // TODO(user): We should return false...
449 if (!InprocessingFixLiteral(new_clause[0])) return nullptr;
450 return nullptr;
451 }
452
453 if (new_clause.size() == 2) {
454 implication_graph_->AddBinaryClause(new_clause[0], new_clause[1]);
455 return nullptr;
456 }
457
458 SatClause* clause = SatClause::Create(new_clause);
459 clauses_.push_back(clause);
460 return clause;
461}
462
464 SCOPED_TIME_STAT(&stats_);
465 for (const LiteralIndex index : needs_cleaning_.PositionsSetAtLeastOnce()) {
466 DCHECK(needs_cleaning_[index]);
467 RemoveIf(&(watchers_on_false_[index]), [](const Watcher& watcher) {
468 return watcher.clause->IsRemoved();
469 });
470 needs_cleaning_.Clear(index);
471 }
472 needs_cleaning_.NotifyAllClear();
473 is_clean_ = true;
474}
475
476// We also update to_minimize_index_/to_probe_index_ correctly.
477//
478// TODO(user): If more indices are needed, generalize the code to a vector of
479// indices.
481 DCHECK(is_clean_);
482
483 int new_size = 0;
484 const int old_size = clauses_.size();
485 for (int i = 0; i < old_size; ++i) {
486 if (i == to_minimize_index_) to_minimize_index_ = new_size;
487 if (i == to_first_minimize_index_) to_first_minimize_index_ = new_size;
488 if (i == to_probe_index_) to_probe_index_ = new_size;
489 if (clauses_[i]->IsRemoved()) {
490 delete clauses_[i];
491 } else {
492 clauses_[new_size++] = clauses_[i];
493 }
494 }
495 clauses_.resize(new_size);
496
497 if (to_minimize_index_ > new_size) to_minimize_index_ = new_size;
498 if (to_first_minimize_index_ > new_size) to_first_minimize_index_ = new_size;
499 if (to_probe_index_ > new_size) to_probe_index_ = new_size;
500}
501
503 for (; to_first_minimize_index_ < clauses_.size();
504 ++to_first_minimize_index_) {
505 if (clauses_[to_first_minimize_index_]->IsRemoved()) continue;
506 if (!IsRemovable(clauses_[to_first_minimize_index_])) {
507 // If the round-robin is in-sync with the new clauses, we may as well
508 // count this minimization as part of the round-robin and advance both
509 // indexes.
510 if (to_minimize_index_ == to_first_minimize_index_) {
511 ++to_minimize_index_;
512 }
513 return clauses_[to_first_minimize_index_++];
514 }
515 }
516 return nullptr;
517}
518
520 for (; to_minimize_index_ < clauses_.size(); ++to_minimize_index_) {
521 if (clauses_[to_minimize_index_]->IsRemoved()) continue;
522 if (!IsRemovable(clauses_[to_minimize_index_])) {
523 return clauses_[to_minimize_index_++];
524 }
525 }
526 return nullptr;
527}
528
530 for (; to_probe_index_ < clauses_.size(); ++to_probe_index_) {
531 if (clauses_[to_probe_index_]->IsRemoved()) continue;
532 return clauses_[to_probe_index_++];
533 }
534 return nullptr;
535}
536
537// ----- BinaryImplicationGraph -----
538
539void BinaryImplicationGraph::Resize(int num_variables) {
540 SCOPED_TIME_STAT(&stats_);
541 bfs_stack_.resize(num_variables << 1);
542 implications_.resize(num_variables << 1);
543 implies_something_.resize(num_variables << 1);
544 might_have_dups_.resize(num_variables << 1);
545 is_redundant_.resize(implications_.size());
546 is_removed_.resize(implications_.size(), false);
547 estimated_sizes_.resize(implications_.size(), 0);
548 in_direct_implications_.resize(implications_.size(), false);
549 reasons_.resize(num_variables);
550}
551
552void BinaryImplicationGraph::NotifyPossibleDuplicate(Literal a) {
553 if (might_have_dups_[a.Index()]) return;
554 might_have_dups_[a.Index()] = true;
555 to_clean_.push_back(a);
556}
557
559 for (const Literal l : to_clean_) {
560 might_have_dups_[l.Index()] = false;
561 gtl::STLSortAndRemoveDuplicates(&implications_[l.Index()]);
562 }
563 to_clean_.clear();
564}
565
566// TODO(user): Not all of the solver knows about representative literal, do
567// use them here to maintains invariant? Explore this when we start cleaning our
568// clauses using equivalence during search. We can easily do it for every
569// conflict we learn instead of here.
571 SCOPED_TIME_STAT(&stats_);
572
573 // Tricky: If this is the first clause, the propagator will be added and
574 // assumed to be in a "propagated" state. This makes sure this is the case.
575 if (IsEmpty()) propagation_trail_index_ = trail_->Index();
576
577 if (drat_proof_handler_ != nullptr) {
578 // TODO(user): Like this we will duplicate all binary clause from the
579 // problem. However this leads to a simpler API (since we don't need to
580 // special case the loading of the original clauses) and we mainly use drat
581 // proof for testing anyway.
582 drat_proof_handler_->AddClause({a, b});
583 }
584
585 if (is_redundant_[a.Index()]) a = Literal(representative_of_[a.Index()]);
586 if (is_redundant_[b.Index()]) b = Literal(representative_of_[b.Index()]);
587 if (a == b.Negated()) return true;
588 if (a == b) return FixLiteral(a);
589
590 DCHECK(!is_removed_[a]);
591 DCHECK(!is_removed_[b]);
592 estimated_sizes_[a.NegatedIndex()]++;
593 estimated_sizes_[b.NegatedIndex()]++;
594 implications_[a.NegatedIndex()].push_back(b);
595 implications_[b.NegatedIndex()].push_back(a);
596 implies_something_.Set(a.NegatedIndex());
597 implies_something_.Set(b.NegatedIndex());
598 NotifyPossibleDuplicate(a);
599 NotifyPossibleDuplicate(b);
600 is_dag_ = false;
601 num_implications_ += 2;
602
603 if (enable_sharing_ && add_binary_callback_ != nullptr) {
604 add_binary_callback_(a, b);
605 }
606
607 const auto& assignment = trail_->Assignment();
608 if (trail_->CurrentDecisionLevel() == 0) {
609 DCHECK(!assignment.LiteralIsAssigned(a));
610 DCHECK(!assignment.LiteralIsAssigned(b));
611 } else {
612 if (assignment.LiteralIsFalse(a)) {
613 if (assignment.LiteralIsAssigned(b)) {
614 if (assignment.LiteralIsFalse(b)) return false;
615 } else {
616 reasons_[trail_->Index()] = a;
617 trail_->Enqueue(b, propagator_id_);
618 }
619 } else if (assignment.LiteralIsFalse(b)) {
620 if (!assignment.LiteralIsAssigned(a)) {
621 reasons_[trail_->Index()] = b;
622 trail_->Enqueue(a, propagator_id_);
623 }
624 }
625 }
626
627 return true;
628}
629
631 absl::Span<const Literal> at_most_one) {
632 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
633 if (at_most_one.size() <= 1) return true;
634
635 // Temporarily copy the at_most_one constraint at the end of
636 // at_most_one_buffer_. It will be cleaned up and added by
637 // CleanUpAndAddAtMostOnes().
638 const int base_index = at_most_one_buffer_.size();
639 at_most_one_buffer_.push_back(Literal(LiteralIndex(at_most_one.size())));
640 at_most_one_buffer_.insert(at_most_one_buffer_.end(), at_most_one.begin(),
641 at_most_one.end());
642
643 is_dag_ = false;
644 return CleanUpAndAddAtMostOnes(base_index);
645}
646
647// TODO(user): remove dupl with ClauseManager::InprocessingFixLiteral().
648//
649// Note that we currently do not support calling this at a positive level since
650// we might loose the fixing on backtrack otherwise.
651bool BinaryImplicationGraph::FixLiteral(Literal true_literal) {
652 CHECK_EQ(trail_->CurrentDecisionLevel(), 0);
653 if (trail_->Assignment().LiteralIsTrue(true_literal)) return true;
654 if (trail_->Assignment().LiteralIsFalse(true_literal)) return false;
655
656 if (drat_proof_handler_ != nullptr) {
657 drat_proof_handler_->AddClause({true_literal});
658 }
659
660 trail_->EnqueueWithUnitReason(true_literal);
661 return Propagate(trail_);
662}
663
664// This works by doing a linear scan on the at_most_one_buffer_ and
665// cleaning/copying the at most ones on the fly to the beginning of the same
666// buffer.
667bool BinaryImplicationGraph::CleanUpAndAddAtMostOnes(int base_index) {
668 const VariablesAssignment& assignment = trail_->Assignment();
669 int local_end = base_index;
670 const int buffer_size = at_most_one_buffer_.size();
671 for (int i = base_index; i < buffer_size;) {
672 // Process a new at most one.
673 // It will be copied into buffer[local_start + 1, local_end].
674 // With its size at buffer[local_start].
675 const int local_start = local_end;
676
677 // Update the iterator now. Even if the current at_most_one is reduced away,
678 // local_start will still point to the next one, or to the end of the
679 // buffer.
680 if (i == at_most_one_iterator_) {
681 at_most_one_iterator_ = local_start;
682 }
683
684 // We have an at_most_one starting at i, and we increment i to the next one.
685 const absl::Span<const Literal> initial_amo = AtMostOne(i);
686 i += initial_amo.size() + 1;
687
688 // Reserve space for size.
689 local_end++;
690 bool set_all_left_to_false = false;
691 for (const Literal l : initial_amo) {
692 if (assignment.LiteralIsFalse(l)) continue;
693 if (is_removed_[l]) continue;
694 if (!set_all_left_to_false && assignment.LiteralIsTrue(l)) {
695 set_all_left_to_false = true;
696 continue;
697 }
698 at_most_one_buffer_[local_end++] = RepresentativeOf(l);
699 }
700 at_most_one_buffer_[local_start] =
701 Literal(LiteralIndex(local_end - local_start - 1));
702
703 // Deal with duplicates.
704 // Any duplicate in an "at most one" must be false.
705 bool some_duplicates = false;
706 if (!set_all_left_to_false) {
707 // Sort the copied amo.
708 // We only want to expose a const AtMostOne() so we sort directly here.
709 Literal* pt = &at_most_one_buffer_[local_start + 1];
710 std::sort(pt, pt + AtMostOne(local_start).size());
711
712 LiteralIndex previous = kNoLiteralIndex;
713 bool remove_previous = false;
714 int new_local_end = local_start + 1;
715 for (const Literal l : AtMostOne(local_start)) {
716 if (l.Index() == previous) {
717 if (assignment.LiteralIsTrue(l)) return false;
718 if (!assignment.LiteralIsFalse(l)) {
719 if (!FixLiteral(l.Negated())) return false;
720 }
721 remove_previous = true;
722 some_duplicates = true;
723 continue;
724 }
725
726 // We need to pay attention to triplet or more of equal elements, so
727 // it is why we need this boolean and can't just remove it right away.
728 if (remove_previous) {
729 --new_local_end;
730 remove_previous = false;
731 }
732 previous = l.Index();
733 at_most_one_buffer_[new_local_end++] = l;
734 }
735 if (remove_previous) --new_local_end;
736
737 // Update local end and the amo size.
738 local_end = new_local_end;
739 at_most_one_buffer_[local_start] =
740 Literal(LiteralIndex(local_end - local_start - 1));
741 }
742
743 // If there was some duplicates, we need to rescan to see if a literal
744 // didn't become true because its negation was appearing twice!
745 if (some_duplicates) {
746 int new_local_end = local_start + 1;
747 for (const Literal l : AtMostOne(local_start)) {
748 if (assignment.LiteralIsFalse(l)) continue;
749 if (!set_all_left_to_false && assignment.LiteralIsTrue(l)) {
750 set_all_left_to_false = true;
751 continue;
752 }
753 at_most_one_buffer_[new_local_end++] = l;
754 }
755
756 // Update local end and the amo size.
757 local_end = new_local_end;
758 at_most_one_buffer_[local_start] =
759 Literal(LiteralIndex(local_end - local_start - 1));
760 }
761
762 // Deal with all false.
763 if (set_all_left_to_false) {
764 for (const Literal l : AtMostOne(local_start)) {
765 if (assignment.LiteralIsFalse(l)) continue;
766 if (assignment.LiteralIsTrue(l)) return false;
767 if (!FixLiteral(l.Negated())) return false;
768 }
769
770 // Erase this at_most_one.
771 local_end = local_start;
772 continue;
773 }
774
775 // Create a Span<> to simplify the code below.
776 const absl::Span<const Literal> at_most_one = AtMostOne(local_start);
777
778 // We expand small sizes into implications.
779 // TODO(user): Investigate what the best threshold is.
780 if (at_most_one.size() <= std::max(2, at_most_one_max_expansion_size_)) {
781 for (const Literal a : at_most_one) {
782 for (const Literal b : at_most_one) {
783 if (a == b) continue;
784 implications_[a].push_back(b.Negated());
785 implies_something_.Set(a);
786 NotifyPossibleDuplicate(a);
787 }
788 }
789 num_implications_ += at_most_one.size() * (at_most_one.size() - 1);
790
791 // This will erase the at_most_one from the buffer.
792 local_end = local_start;
793 continue;
794 }
795
796 // Index the new at most one.
797 for (const Literal l : at_most_one) {
798 if (l.Index() >= at_most_ones_.size()) {
799 at_most_ones_.resize(l.Index().value() + 1);
800 }
801 DCHECK(!is_redundant_[l]);
802 at_most_ones_[l].push_back(local_start);
803 implies_something_.Set(l);
804 }
805 }
806
807 at_most_one_buffer_.resize(local_end);
808 return true;
809}
810
812 SCOPED_TIME_STAT(&stats_);
813
814 if (IsEmpty()) {
816 return true;
817 }
819
820 const auto assignment = AssignmentView(trail->Assignment());
821 const auto implies_something = implies_something_.view();
822 auto* implications = implications_.data();
823
825 const Literal true_literal = (*trail)[propagation_trail_index_++];
826 DCHECK(assignment.LiteralIsTrue(true_literal));
827 if (!implies_something[true_literal]) continue;
828
829 // Note(user): This update is not exactly correct because in case of
830 // conflict we don't inspect that much clauses. But doing ++num_inspections_
831 // inside the loop does slow down the code by a few percent.
832 const absl::Span<const Literal> implied =
833 implications[true_literal.Index().value()];
834 num_inspections_ += implied.size();
835 for (const Literal literal : implied) {
836 if (assignment.LiteralIsTrue(literal)) {
837 // Note(user): I tried to update the reason here if the literal was
838 // enqueued after the true_literal on the trail. This property is
839 // important for ComputeFirstUIPConflict() to work since it needs the
840 // trail order to be a topological order for the deduction graph.
841 // But the performance was not too good...
842 continue;
843 }
844
845 ++num_propagations_;
846 if (assignment.LiteralIsFalse(literal)) {
847 // Conflict.
848 *(trail->MutableConflict()) = {true_literal.Negated(), literal};
849 return false;
850 } else {
851 // Propagation.
852 reasons_[trail->Index()] = true_literal.Negated();
853 trail->FastEnqueue(literal);
854 }
855 }
856
857 // Propagate the at_most_one constraints.
858 if (true_literal.Index() < at_most_ones_.size()) {
859 for (const int start : at_most_ones_[true_literal]) {
860 bool seen = false;
861 for (const Literal literal : AtMostOne(start)) {
862 ++num_inspections_;
863 if (literal == true_literal) {
864 if (DEBUG_MODE) {
865 DCHECK(!seen);
866 seen = true;
867 }
868 continue;
869 }
870 if (assignment.LiteralIsFalse(literal)) continue;
871
872 ++num_propagations_;
873 if (assignment.LiteralIsTrue(literal)) {
874 // Conflict.
875 *(trail->MutableConflict()) = {true_literal.Negated(),
876 literal.Negated()};
877 return false;
878 } else {
879 // Propagation.
880 reasons_[trail->Index()] = true_literal.Negated();
881 trail->FastEnqueue(literal.Negated());
882 }
883 }
884 }
885 }
886 }
887
888 return true;
889}
890
891absl::Span<const Literal> BinaryImplicationGraph::Reason(
892 const Trail& /*trail*/, int trail_index, int64_t /*conflict_id*/) const {
893 return {&reasons_[trail_index], 1};
894}
895
896// Here, we remove all the literal whose negation are implied by the negation of
897// the 1-UIP literal (which always appear first in the given conflict). Note
898// that this algorithm is "optimal" in the sense that it leads to a minimized
899// conflict with a backjump level as low as possible. However, not all possible
900// literals are removed.
901//
902// TODO(user): Also consider at most one?
904 std::vector<Literal>* conflict) {
905 SCOPED_TIME_STAT(&stats_);
906 dfs_stack_.clear();
907
908 // Compute the reachability from the literal "not(conflict->front())" using
909 // an iterative dfs.
910 const LiteralIndex root_literal_index = conflict->front().NegatedIndex();
911 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
912 is_marked_.Set(root_literal_index);
913
914 // TODO(user): This sounds like a good idea, but somehow it seems better not
915 // to do that even though it is almost for free. Investigate more.
916 //
917 // The idea here is that since we already compute the reachability from the
918 // root literal, we can use this computation to remove any implication
919 // root_literal => b if there is already root_literal => a and b is reachable
920 // from a.
921 const bool also_prune_direct_implication_list = false;
922
923 // We treat the direct implications differently so we can also remove the
924 // redundant implications from this list at the same time.
925 auto& direct_implications = implications_[root_literal_index];
926 for (const Literal l : direct_implications) {
927 if (is_marked_[l]) continue;
928 dfs_stack_.push_back(l);
929 while (!dfs_stack_.empty()) {
930 const LiteralIndex index = dfs_stack_.back().Index();
931 dfs_stack_.pop_back();
932 if (!is_marked_[index]) {
933 is_marked_.Set(index);
934 for (const Literal implied : implications_[index]) {
935 if (!is_marked_[implied]) dfs_stack_.push_back(implied);
936 }
937 }
938 }
939
940 // The "trick" is to unmark 'l'. This way, if we explore it twice, it means
941 // that this l is reachable from some other 'l' from the direct implication
942 // list. Remarks:
943 // - We don't loose too much complexity when this happen since a literal
944 // can be unmarked only once, so in the worst case we loop twice over its
945 // children. Moreover, this literal will be pruned for later calls.
946 // - This is correct, i.e. we can't prune too many literals because of a
947 // strongly connected component. Proof by contradiction: If we take the
948 // first (in direct_implications) literal from a removed SCC, it must
949 // have marked all the others. But because they are marked, they will not
950 // be explored again and so can't mark the first literal.
951 if (also_prune_direct_implication_list) {
952 is_marked_.Clear(l);
953 }
954 }
955
956 // Now we can prune the direct implications list and make sure are the
957 // literals there are marked.
958 if (also_prune_direct_implication_list) {
959 int new_size = 0;
960 for (const Literal l : direct_implications) {
961 if (!is_marked_[l]) {
962 is_marked_.Set(l);
963 direct_implications[new_size] = l;
964 ++new_size;
965 }
966 }
967 if (new_size < direct_implications.size()) {
968 num_redundant_implications_ += direct_implications.size() - new_size;
969 direct_implications.resize(new_size);
970 }
971 }
972
973 RemoveRedundantLiterals(conflict);
974}
975
976// Same as MinimizeConflictWithReachability() but also mark (in the given
977// SparseBitset) the reachable literal already assigned to false. These literals
978// will be implied if the 1-UIP literal is assigned to false, and the classic
979// minimization algorithm can take advantage of that.
981 const Trail& trail, std::vector<Literal>* conflict,
983 SCOPED_TIME_STAT(&stats_);
984 DCHECK(!conflict->empty());
985 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
986 MarkDescendants(conflict->front().Negated());
987 for (const LiteralIndex i : is_marked_.PositionsSetAtLeastOnce()) {
988 // TODO(user): if this is false, then we actually have a conflict of size 2.
989 // This can only happen if the binary clause was not propagated properly
990 // if for instance we do chronological bactracking without re-enqueuing the
991 // consequence of a binary clause.
992 if (trail.Assignment().LiteralIsTrue(Literal(i))) {
993 marked->Set(Literal(i).Variable());
994 }
995 }
996 RemoveRedundantLiterals(conflict);
997}
998
999// Same as MinimizeConflictFirst() but take advantage of this reachability
1000// computation to remove redundant implication in the implication list of the
1001// first UIP conflict.
1003 const Trail& /*trail*/, std::vector<Literal>* conflict,
1004 absl::BitGenRef random) {
1005 SCOPED_TIME_STAT(&stats_);
1006 const LiteralIndex root_literal_index = conflict->front().NegatedIndex();
1007 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1008 is_marked_.Set(root_literal_index);
1009
1010 int new_size = 0;
1011 auto& direct_implications = implications_[root_literal_index];
1012
1013 // The randomization allow to find more redundant implication since to find
1014 // a => b and remove b, a must be before b in direct_implications. Note that
1015 // a std::reverse() could work too. But randomization seems to work better.
1016 // Probably because it has other impact on the search tree.
1017 std::shuffle(direct_implications.begin(), direct_implications.end(), random);
1018 dfs_stack_.clear();
1019 for (const Literal l : direct_implications) {
1020 if (is_marked_[l]) {
1021 // The literal is already marked! so it must be implied by one of the
1022 // previous literal in the direct_implications list. We can safely remove
1023 // it.
1024 continue;
1025 }
1026 direct_implications[new_size++] = l;
1027 dfs_stack_.push_back(l);
1028 while (!dfs_stack_.empty()) {
1029 const LiteralIndex index = dfs_stack_.back().Index();
1030 dfs_stack_.pop_back();
1031 if (!is_marked_[index]) {
1032 is_marked_.Set(index);
1033 for (const Literal implied : implications_[index]) {
1034 if (!is_marked_[implied]) dfs_stack_.push_back(implied);
1035 }
1036 }
1037 }
1038 }
1039 if (new_size < direct_implications.size()) {
1040 num_redundant_implications_ += direct_implications.size() - new_size;
1041 direct_implications.resize(new_size);
1042 }
1043 RemoveRedundantLiterals(conflict);
1044}
1045
1046void BinaryImplicationGraph::RemoveRedundantLiterals(
1047 std::vector<Literal>* conflict) {
1048 SCOPED_TIME_STAT(&stats_);
1049 int new_index = 1;
1050 for (int i = 1; i < conflict->size(); ++i) {
1051 if (!is_marked_[(*conflict)[i].NegatedIndex()]) {
1052 (*conflict)[new_index] = (*conflict)[i];
1053 ++new_index;
1054 }
1055 }
1056 if (new_index < conflict->size()) {
1057 ++num_minimization_;
1058 num_literals_removed_ += conflict->size() - new_index;
1059 conflict->resize(new_index);
1060 }
1061}
1062
1063// TODO(user): Also consider at most one?
1065 const Trail& trail, std::vector<Literal>* conflict) {
1066 SCOPED_TIME_STAT(&stats_);
1067 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1068 is_simplified_.ClearAndResize(LiteralIndex(implications_.size()));
1069 for (const Literal lit : *conflict) {
1070 is_marked_.Set(lit);
1071 }
1072
1073 // Identify and remove the redundant literals from the given conflict.
1074 // 1/ If a -> b then a can be removed from the conflict clause.
1075 // This is because not b -> not a.
1076 // 2/ a -> b can only happen if level(a) <= level(b).
1077 // 3/ Because of 2/, cycles can appear only at the same level.
1078 // The vector is_simplified_ is used to avoid removing all elements of a
1079 // cycle. Note that this is not optimal in the sense that we may not remove
1080 // a literal that can be removed.
1081 //
1082 // Note that there is no need to explore the unique literal of the highest
1083 // decision level since it can't be removed. Because this is a conflict, such
1084 // literal is always at position 0, so we start directly at 1.
1085 int index = 1;
1086 for (int i = 1; i < conflict->size(); ++i) {
1087 const Literal lit = (*conflict)[i];
1088 const int lit_level = trail.Info(lit.Variable()).level;
1089 bool keep_literal = true;
1090 for (const Literal implied : implications_[lit]) {
1091 if (is_marked_[implied]) {
1092 DCHECK_LE(lit_level, trail.Info(implied.Variable()).level);
1093 if (lit_level == trail.Info(implied.Variable()).level &&
1094 is_simplified_[implied]) {
1095 continue;
1096 }
1097 keep_literal = false;
1098 break;
1099 }
1100 }
1101 if (keep_literal) {
1102 (*conflict)[index] = lit;
1103 ++index;
1104 } else {
1105 is_simplified_.Set(lit);
1106 }
1107 }
1108 if (index < conflict->size()) {
1109 ++num_minimization_;
1110 num_literals_removed_ += conflict->size() - index;
1111 conflict->erase(conflict->begin() + index, conflict->end());
1112 }
1113}
1114
1116 SCOPED_TIME_STAT(&stats_);
1117 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
1118 if (IsEmpty()) return;
1119
1120 // Nothing to do if nothing changed since last call.
1121 const int new_num_fixed = trail_->Index();
1122 DCHECK_EQ(propagation_trail_index_, new_num_fixed);
1123 if (num_processed_fixed_variables_ == new_num_fixed) return;
1124
1125 const VariablesAssignment& assignment = trail_->Assignment();
1126 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1127 for (; num_processed_fixed_variables_ < new_num_fixed;
1128 ++num_processed_fixed_variables_) {
1129 const Literal true_literal = (*trail_)[num_processed_fixed_variables_];
1130 if (DEBUG_MODE) {
1131 // The code assumes that everything is already propagated.
1132 // Otherwise we will remove implications that didn't propagate yet!
1133 for (const Literal lit : implications_[true_literal]) {
1134 DCHECK(trail_->Assignment().LiteralIsTrue(lit));
1135 }
1136 }
1137
1138 // If b is true and a -> b then because not b -> not a, all the
1139 // implications list that contains b will be marked by this process.
1140 // And the ones that contains not(b) should correspond to a false literal!
1141 //
1142 // TODO(user): This might not be true if we remove implication by
1143 // transitive reduction and the process was aborted due to the computation
1144 // limit. I think it will be good to maintain that invariant though,
1145 // otherwise fixed literals might never be removed from these lists...
1146 for (const Literal lit : implications_[true_literal.NegatedIndex()]) {
1147 if (lit.NegatedIndex() < representative_of_.size() &&
1148 representative_of_[lit.Negated()] != kNoLiteralIndex) {
1149 // We mark its representative instead.
1150 is_marked_.Set(representative_of_[lit.Negated()]);
1151 } else {
1152 is_marked_.Set(lit.NegatedIndex());
1153 }
1154 }
1155 gtl::STLClearObject(&(implications_[true_literal]));
1156 gtl::STLClearObject(&(implications_[true_literal.NegatedIndex()]));
1157
1158 if (true_literal.Index() < at_most_ones_.size()) {
1159 gtl::STLClearObject(&(at_most_ones_[true_literal]));
1160 }
1161 if (true_literal.NegatedIndex() < at_most_ones_.size()) {
1162 gtl::STLClearObject(&(at_most_ones_[true_literal.NegatedIndex()]));
1163 }
1164 }
1165 for (const LiteralIndex i : is_marked_.PositionsSetAtLeastOnce()) {
1166 RemoveIf(&implications_[i], [&assignment](const Literal& lit) {
1167 return assignment.LiteralIsTrue(lit);
1168 });
1169 }
1170
1171 // TODO(user): This might be a bit slow. Do not call all the time if needed,
1172 // this shouldn't change the correctness of the code.
1173 at_most_ones_.clear();
1174 CleanUpAndAddAtMostOnes(/*base_index=*/0);
1175 DCHECK(InvariantsAreOk());
1176}
1177
1179 public:
1187
1188 explicit SccGraph(SccFinder* finder, Implications* graph,
1189 AtMostOnes* at_most_ones,
1190 std::vector<Literal>* at_most_one_buffer)
1191 : finder_(*finder),
1192 implications_(*graph),
1193 at_most_ones_(*at_most_ones),
1194 at_most_one_buffer_(*at_most_one_buffer) {}
1195
1196 const std::vector<int32_t>& operator[](int32_t node) const {
1197 tmp_.clear();
1198 for (const Literal l : implications_[LiteralIndex(node)]) {
1199 tmp_.push_back(l.Index().value());
1200 if (finder_.NodeIsInCurrentDfsPath(l.NegatedIndex().value())) {
1201 to_fix_.push_back(l);
1202 }
1203 }
1204 if (node < at_most_ones_.size()) {
1205 for (const int start : at_most_ones_[LiteralIndex(node)]) {
1206 if (start >= at_most_one_already_explored_.size()) {
1207 at_most_one_already_explored_.resize(start + 1, false);
1208 previous_node_to_explore_at_most_one_.resize(start + 1);
1209 }
1210
1211 // In the presence of at_most_ones_ constraints, expanding them
1212 // implicitly to implications in the SCC computation can result in a
1213 // quadratic complexity rather than a linear one in term of the input
1214 // data structure size. So this test here is critical on problem with
1215 // large at_most ones like the "ivu06-big.mps.gz" where without it, the
1216 // full FindStronglyConnectedComponents() take more than on hour instead
1217 // of less than a second!
1218 if (at_most_one_already_explored_[start]) {
1219 // We never expand a node twice.
1220 const int first_node = previous_node_to_explore_at_most_one_[start];
1221 DCHECK_NE(node, first_node);
1222
1223 if (finder_.NodeIsInCurrentDfsPath(first_node)) {
1224 // If the first node is not settled, then we do explore the
1225 // at_most_one constraint again. In "Mixed-Integer-Programming:
1226 // Analyzing 12 years of progress", Tobias Achterberg and Roland
1227 // Wunderling explains that an at most one need to be looped over at
1228 // most twice. I am not sure exactly how that works, so for now we
1229 // are not fully linear, but on actual instances, we only rarely
1230 // run into this case.
1231 //
1232 // Note that we change the previous node to explore at most one
1233 // since the current node will be settled before the old ones.
1234 //
1235 // TODO(user): avoid looping more than twice on the same at most one
1236 // constraints? Note that the second time we loop we have x => y =>
1237 // not(x), so we can already detect that x must be false which we
1238 // detect below.
1239 previous_node_to_explore_at_most_one_[start] = node;
1240 } else {
1241 // The first node is already settled and so are all its child. Only
1242 // not(first_node) might still need exploring.
1243 tmp_.push_back(
1244 Literal(LiteralIndex(first_node)).NegatedIndex().value());
1245 continue;
1246 }
1247 } else {
1248 at_most_one_already_explored_[start] = true;
1249 previous_node_to_explore_at_most_one_[start] = node;
1250 }
1251
1252 const absl::Span<const Literal> amo =
1253 absl::MakeSpan(&at_most_one_buffer_[start + 1],
1254 at_most_one_buffer_[start].Index().value());
1255 for (const Literal l : amo) {
1256 if (l.Index() == node) continue;
1257 tmp_.push_back(l.NegatedIndex().value());
1258 if (finder_.NodeIsInCurrentDfsPath(l.Index().value())) {
1259 to_fix_.push_back(l.Negated());
1260 }
1261 }
1262 }
1263 }
1264 work_done_ += tmp_.size();
1265 return tmp_;
1266 }
1267
1268 // All these literals where detected to be true during the SCC computation.
1269 mutable std::vector<Literal> to_fix_;
1270
1271 // For the deterministic time.
1272 mutable int64_t work_done_ = 0;
1273
1274 private:
1275 const SccFinder& finder_;
1276 const Implications& implications_;
1277 const AtMostOnes& at_most_ones_;
1278 const std::vector<Literal>& at_most_one_buffer_;
1279
1280 mutable std::vector<int32_t> tmp_;
1281
1282 // Used to get a non-quadratic complexity in the presence of at most ones.
1283 mutable std::vector<bool> at_most_one_already_explored_;
1284 mutable std::vector<int> previous_node_to_explore_at_most_one_;
1285};
1286
1288 // This was already called, and no new constraint where added. Note that new
1289 // fixed variable cannot create new equivalence, only new binary clauses do.
1290 if (is_dag_) return true;
1291 WallTimer wall_timer;
1292 wall_timer.Start();
1293 log_info |= VLOG_IS_ON(1);
1294
1295 // Lets remove all fixed variables first.
1296 if (!Propagate(trail_)) return false;
1298 const VariablesAssignment& assignment = trail_->Assignment();
1299 DCHECK(InvariantsAreOk());
1300
1301 // TODO(user): We could just do it directly though.
1302 const int32_t size(implications_.size());
1304 scc.reserve(size);
1305 double dtime = 0.0;
1306 {
1307 SccGraph::SccFinder finder;
1308 SccGraph graph(&finder, &implications_, &at_most_ones_,
1309 &at_most_one_buffer_);
1310 finder.FindStronglyConnectedComponents(size, graph, &scc);
1311 dtime += 4e-8 * graph.work_done_;
1312
1313 for (const Literal l : graph.to_fix_) {
1314 if (assignment.LiteralIsFalse(l)) return false;
1315 if (assignment.LiteralIsTrue(l)) continue;
1316 if (!FixLiteral(l)) return false;
1317 }
1318 }
1319
1320 // The old values will still be valid.
1321 representative_of_.resize(size, kNoLiteralIndex);
1322 is_redundant_.resize(size);
1323
1324 int num_equivalences = 0;
1325 reverse_topological_order_.clear();
1326 for (int index = 0; index < scc.size(); ++index) {
1327 const absl::Span<int32_t> component = scc[index];
1328
1329 // If one is fixed then all must be fixed. Note that the reason why the
1330 // propagation didn't already do that and we don't always get fixed
1331 // component of size 1 is because of the potential newly fixed literals
1332 // above.
1333 //
1334 // In any case, all fixed literals are marked as redundant.
1335 {
1336 bool all_fixed = false;
1337 bool all_true = false;
1338 for (const int32_t i : component) {
1339 const Literal l = Literal(LiteralIndex(i));
1340 if (trail_->Assignment().LiteralIsAssigned(l)) {
1341 all_fixed = true;
1342 all_true = trail_->Assignment().LiteralIsTrue(l);
1343 break;
1344 }
1345 }
1346 if (all_fixed) {
1347 for (const int32_t i : component) {
1348 const Literal l = Literal(LiteralIndex(i));
1349 if (!is_redundant_[l]) {
1350 ++num_redundant_literals_;
1351 is_redundant_.Set(l);
1352 representative_of_[l] = l.Index();
1353 }
1354 const Literal to_fix = all_true ? l : l.Negated();
1355 if (assignment.LiteralIsFalse(to_fix)) return false;
1356 if (assignment.LiteralIsTrue(to_fix)) continue;
1357 if (!FixLiteral(l)) return false;
1358 }
1359
1360 // Next component.
1361 continue;
1362 }
1363 }
1364
1365 // We ignore variable that appear in no constraints.
1366 if (component.size() == 1 && is_removed_[LiteralIndex(component[0])]) {
1367 continue;
1368 }
1369
1370 // We always take the smallest literal index (which also corresponds to the
1371 // smallest BooleanVariable index) as a representative. This make sure that
1372 // the representative of a literal l and the one of not(l) will be the
1373 // negation of each other. There is also reason to think that it is
1374 // heuristically better to use a BooleanVariable that was created first.
1375 std::sort(component.begin(), component.end());
1376 const LiteralIndex representative(component[0]);
1377 reverse_topological_order_.push_back(representative);
1378
1379 if (component.size() == 1) {
1380 // Note that because we process list in reverse topological order, this
1381 // is only needed if there is any equivalence before this point.
1382 if (num_equivalences > 0) {
1383 auto& representative_list = implications_[representative];
1384 for (Literal& ref : representative_list) {
1385 const LiteralIndex rep = representative_of_[ref];
1386 if (rep == representative) continue;
1387 if (rep == kNoLiteralIndex) continue;
1388 ref = Literal(rep);
1389 }
1390 gtl::STLSortAndRemoveDuplicates(&representative_list);
1391 }
1392 continue;
1393 }
1394
1395 // Sets the representative.
1396 for (int i = 1; i < component.size(); ++i) {
1397 const Literal literal = Literal(LiteralIndex(component[i]));
1398 if (!is_redundant_[literal]) {
1399 ++num_redundant_literals_;
1400 is_redundant_.Set(literal);
1401 }
1402 representative_of_[literal] = representative;
1403
1404 // Detect if x <=> not(x) which means unsat. Note that we relly on the
1405 // fact that when sorted, they will both be consecutive in the list.
1406 if (Literal(LiteralIndex(component[i - 1])).Negated() == literal) {
1407 LOG_IF(INFO, log_info) << "Trivially UNSAT in DetectEquivalences()";
1408 return false;
1409 }
1410 }
1411
1412 // Merge all the lists in implications_[representative].
1413 // Note that we do not want representative in its own list.
1414 auto& representative_list = implications_[representative];
1415 int new_size = 0;
1416 for (const Literal l : representative_list) {
1417 const Literal rep = RepresentativeOf(l);
1418 if (rep.Index() == representative) continue;
1419 representative_list[new_size++] = rep;
1420 }
1421 representative_list.resize(new_size);
1422 for (int i = 1; i < component.size(); ++i) {
1423 const Literal literal = Literal(LiteralIndex(component[i]));
1424 auto& ref = implications_[literal];
1425 for (const Literal l : ref) {
1426 const Literal rep = RepresentativeOf(l);
1427 if (rep.Index() != representative) representative_list.push_back(rep);
1428 }
1429
1430 // Add representative <=> literal.
1431 //
1432 // Remark: this relation do not need to be added to a DRAT proof since
1433 // the redundant variables should never be used again for a pure SAT
1434 // problem.
1435 representative_list.push_back(literal);
1436 ref.clear();
1437 ref.push_back(Literal(representative));
1438 }
1439 gtl::STLSortAndRemoveDuplicates(&representative_list);
1440 num_equivalences += component.size() - 1;
1441 }
1442
1443 is_dag_ = true;
1444 if (num_equivalences != 0) {
1445 // Remap all at most ones. Remove fixed variables, process duplicates. Note
1446 // that this might result in more implications when we expand small at most
1447 // one.
1448 at_most_ones_.clear();
1449 int saved_trail_index = propagation_trail_index_;
1450 CleanUpAndAddAtMostOnes(/*base_index=*/0);
1451 // This might have run the propagation on a few variables without taking
1452 // into account the AMOs. Propagate again.
1453 //
1454 // TODO(user): Maybe a better alternative is to not propagate when we fix
1455 // variables inside CleanUpAndAddAtMostOnes().
1456 if (propagation_trail_index_ != saved_trail_index) {
1457 propagation_trail_index_ = saved_trail_index;
1458 Propagate(trail_);
1459 }
1460
1461 num_implications_ = 0;
1462 for (LiteralIndex i(0); i < size; ++i) {
1463 num_implications_ += implications_[i].size();
1464 }
1465 dtime += 2e-8 * num_implications_;
1466 }
1467
1468 time_limit_->AdvanceDeterministicTime(dtime);
1469 const int num_fixed_during_scc =
1470 trail_->Index() - num_processed_fixed_variables_;
1472 DCHECK(InvariantsAreOk());
1473 LOG_IF(INFO, log_info) << "SCC. " << num_equivalences
1474 << " redundant equivalent literals. "
1475 << num_fixed_during_scc << " fixed. "
1476 << num_implications_ << " implications left. "
1477 << implications_.size() << " literals."
1478 << " size of at_most_one buffer = "
1479 << at_most_one_buffer_.size() << "."
1480 << " dtime: " << dtime
1481 << " wtime: " << wall_timer.Get();
1482 return true;
1483}
1484
1485// Note that as a side effect this also do a full "failed literal probing"
1486// using the binary implication graph only.
1487//
1488// TODO(user): Track which literal have new implications, and only process
1489// the antecedents of these.
1491 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
1492 if (time_limit_->LimitReached()) return true;
1493 if (!DetectEquivalences()) return false;
1494
1495 // TODO(user): the situation with fixed variable is not really "clean".
1496 // Simplify the code so we are sure we don't run into issue or have to deal
1497 // with any of that here.
1498 if (time_limit_->LimitReached()) return true;
1499 if (!Propagate(trail_)) return false;
1501 DCHECK(InvariantsAreOk());
1502 if (time_limit_->LimitReached()) return true;
1503
1504 log_info |= VLOG_IS_ON(1);
1505 WallTimer wall_timer;
1506 wall_timer.Start();
1507
1508 int64_t num_fixed = 0;
1509 int64_t num_new_redundant_implications = 0;
1510 bool aborted = false;
1511 tmp_removed_.clear();
1512 work_done_in_mark_descendants_ = 0;
1513 int marked_index = 0;
1514
1515 // For each node we do a graph traversal and only keep the literals
1516 // at maximum distance 1. This only works because we have a DAG when ignoring
1517 // the "redundant" literal marked by DetectEquivalences(). Note that we also
1518 // need no duplicates in the implications list for correctness which is also
1519 // guaranteed by DetectEquivalences().
1520 //
1521 // TODO(user): We should be able to reuse some propagation like it is done for
1522 // tree-look. Once a node is processed, we just need to process a node that
1523 // implies it. Test if we can make this faster. Alternatively, only clear
1524 // a part of is_marked_ (after the first child of root in reverse topo order).
1525 //
1526 // TODO(user): Can we exploit the fact that the implication graph is a
1527 // skew-symmetric graph (isomorphic to its transposed) so that we do less
1528 // work?
1529 const LiteralIndex size(implications_.size());
1530 LiteralIndex previous = kNoLiteralIndex;
1531 for (const LiteralIndex root : reverse_topological_order_) {
1532 // In most situation reverse_topological_order_ contains no redundant, fixed
1533 // or removed variables. But the reverse_topological_order_ is only
1534 // recomputed when new binary are added to the graph, not when new variable
1535 // are fixed.
1536 if (is_redundant_[root]) continue;
1537 if (trail_->Assignment().LiteralIsAssigned(Literal(root))) continue;
1538
1539 auto& direct_implications = implications_[root];
1540 if (direct_implications.empty()) continue;
1541
1542 // This is a "poor" version of the tree look stuff, but it does show good
1543 // improvement. If we just processed one of the child of root, we don't
1544 // need to re-explore it.
1545 //
1546 // TODO(user): Another optim we can do is that we never need to expand
1547 // any node with a reverse topo order smaller or equal to the min of the
1548 // ones in this list.
1549 bool clear_previous_reachability = true;
1550 for (const Literal direct_child : direct_implications) {
1551 if (direct_child.Index() == previous) {
1552 clear_previous_reachability = false;
1553 is_marked_.Clear(previous);
1554 break;
1555 }
1556 }
1557 if (clear_previous_reachability) {
1558 is_marked_.ClearAndResize(size);
1559 marked_index = 0;
1560 }
1561 previous = root;
1562
1563 for (const Literal direct_child : direct_implications) {
1564 if (is_redundant_[direct_child]) continue;
1565 if (is_marked_[direct_child]) continue;
1566
1567 // This is a corner case where because of equivalent literal, root
1568 // appear in implications_[root], we will remove it below.
1569 if (direct_child.Index() == root) continue;
1570
1571 // When this happens, then root must be false, we handle this just after
1572 // the loop.
1573 if (direct_child.NegatedIndex() == root) {
1574 is_marked_.Set(direct_child);
1575 break;
1576 }
1577
1578 MarkDescendants(direct_child);
1579
1580 // We have a DAG, so direct_child could only be marked first.
1581 is_marked_.Clear(direct_child);
1582 }
1583 DCHECK(!is_marked_[root])
1584 << "DetectEquivalences() should have removed cycles!";
1585 is_marked_.Set(root);
1586
1587 // Also mark all the ones reachable through the root AMOs.
1588 if (root < at_most_ones_.size()) {
1589 auto is_marked = is_marked_.BitsetView();
1590 for (const int start : at_most_ones_[root]) {
1591 for (const Literal l : AtMostOne(start)) {
1592 if (l.Index() == root) continue;
1593 if (!is_marked[l.Negated()] && !is_redundant_[l.Negated()]) {
1594 is_marked_.SetUnsafe(is_marked, l.Negated());
1595 MarkDescendants(l.Negated());
1596 }
1597 }
1598 }
1599 }
1600
1601 // Failed literal probing. If both x and not(x) are marked then root must be
1602 // false. Note that because we process "roots" in reverse topological order,
1603 // we will fix the LCA of x and not(x) first.
1604 const auto& marked_positions = is_marked_.PositionsSetAtLeastOnce();
1605 for (; marked_index < marked_positions.size(); ++marked_index) {
1606 const LiteralIndex i = marked_positions[marked_index];
1607 if (is_marked_[Literal(i).NegatedIndex()]) {
1608 // We tested that at the beginning.
1609 DCHECK(!trail_->Assignment().LiteralIsAssigned(Literal(root)));
1610
1611 // We propagate right away the binary implications so that we do not
1612 // need to consider all antecedents of root in the transitive
1613 // reduction.
1614 ++num_fixed;
1615 if (!FixLiteral(Literal(root).Negated())) return false;
1616 break;
1617 }
1618 }
1619
1620 // Note that direct_implications will be cleared by
1621 // RemoveFixedVariables() that will need to inspect it to completely
1622 // remove Literal(root) from all lists.
1623 if (trail_->Assignment().LiteralIsAssigned(Literal(root))) continue;
1624
1625 // Only keep the non-marked literal (and the redundant one which are never
1626 // marked). We mark root to remove it in the corner case where it was
1627 // there.
1628 int new_size = 0;
1629 for (const Literal l : direct_implications) {
1630 if (!is_marked_[l]) {
1631 direct_implications[new_size++] = l;
1632 } else {
1633 tmp_removed_.push_back({Literal(root), l});
1634 DCHECK(!is_redundant_[l]);
1635 }
1636 }
1637 const int diff = direct_implications.size() - new_size;
1638 direct_implications.resize(new_size);
1639 direct_implications.shrink_to_fit();
1640 num_new_redundant_implications += diff;
1641 num_implications_ -= diff;
1642
1643 // Abort if the computation involved is too big.
1644 if (work_done_in_mark_descendants_ > 1e8) {
1645 aborted = true;
1646 break;
1647 }
1648 }
1649
1650 is_marked_.ClearAndResize(size);
1651
1652 // If we aborted early, we might no longer have both a=>b and not(b)=>not(a).
1653 // This is not desirable has some algo relies on this invariant. We fix this
1654 // here.
1655 if (aborted) {
1656 absl::flat_hash_set<std::pair<LiteralIndex, LiteralIndex>> removed;
1657 for (const auto [a, b] : tmp_removed_) {
1658 removed.insert({a.Index(), b.Index()});
1659 }
1660 for (LiteralIndex i(0); i < implications_.size(); ++i) {
1661 int new_size = 0;
1662 const LiteralIndex negated_i = Literal(i).NegatedIndex();
1663 auto& implication = implications_[i];
1664 for (const Literal l : implication) {
1665 if (removed.contains({l.NegatedIndex(), negated_i})) continue;
1666 implication[new_size++] = l;
1667 }
1668 implication.resize(new_size);
1669 }
1670 }
1671 if (num_fixed > 0) {
1673 }
1674 DCHECK(InvariantsAreOk());
1675
1676 gtl::STLClearObject(&tmp_removed_);
1677 const double dtime = 1e-8 * work_done_in_mark_descendants_;
1678 time_limit_->AdvanceDeterministicTime(dtime);
1679 num_redundant_implications_ += num_new_redundant_implications;
1680 LOG_IF(INFO, log_info) << "Transitive reduction removed "
1681 << num_new_redundant_implications << " literals. "
1682 << num_fixed << " fixed. " << num_implications_
1683 << " implications left. " << implications_.size()
1684 << " literals." << " dtime: " << dtime
1685 << " wtime: " << wall_timer.Get()
1686 << (aborted ? " Aborted." : "");
1687 return true;
1688}
1689
1690namespace {
1691
1692int ElementInIntersectionOrMinusOne(absl::Span<const int> a,
1693 absl::Span<const int> b) {
1694 DCHECK(std::is_sorted(a.begin(), a.end()));
1695 DCHECK(std::is_sorted(b.begin(), b.end()));
1696 if (a.empty() || b.empty()) return -1;
1697 int i = 0;
1698 int j = 0;
1699 while (true) {
1700 if (a[i] == b[j]) return a[i];
1701 if (a[i] < b[j]) {
1702 if (++i == a.size()) return -1;
1703 } else {
1704 if (++j == b.size()) return -1;
1705 }
1706 }
1707}
1708
1709} // namespace
1710
1711std::vector<std::pair<int, int>>
1712BinaryImplicationGraph::FilterAndSortAtMostOnes(
1713 absl::Span<std::vector<Literal>> at_most_ones) {
1714 // We starts by processing larger constraints first.
1715 // But we want the output order to be stable.
1716 std::vector<std::pair<int, int>> index_size_vector;
1717 const int num_amos = at_most_ones.size();
1718 index_size_vector.reserve(num_amos);
1719 for (int index = 0; index < num_amos; ++index) {
1720 std::vector<Literal>& clique = at_most_ones[index];
1721 if (clique.size() <= 1) continue;
1722
1723 // Note(user): Because we always use literal with the smallest variable
1724 // indices as representative, this make sure that if possible, we express
1725 // the clique in term of user provided variable (that are always created
1726 // first).
1727 //
1728 // Remap the clique to only use representative.
1729 for (Literal& ref : clique) {
1730 DCHECK_LT(ref.Index(), representative_of_.size());
1731 const LiteralIndex rep = representative_of_[ref];
1732 if (rep == kNoLiteralIndex) continue;
1733 ref = Literal(rep);
1734 }
1735
1736 // We skip anything that can be presolved further as the code below do
1737 // not handle duplicate well.
1738 //
1739 // TODO(user): Shall we presolve it here?
1740 bool skip = false;
1741 std::sort(clique.begin(), clique.end());
1742 for (int i = 1; i < clique.size(); ++i) {
1743 if (clique[i] == clique[i - 1] || clique[i] == clique[i - i].Negated()) {
1744 skip = true;
1745 break;
1746 }
1747 }
1748 if (skip) continue;
1749
1750 index_size_vector.push_back({index, clique.size()});
1751 }
1752 std::stable_sort(
1753 index_size_vector.begin(), index_size_vector.end(),
1754 [](const std::pair<int, int> a, const std::pair<int, int>& b) {
1755 return a.second > b.second;
1756 });
1757 return index_size_vector;
1758}
1759
1761 std::vector<std::vector<Literal>>* at_most_ones,
1762 int64_t max_num_explored_nodes) {
1763 // The code below assumes a DAG.
1764 if (!DetectEquivalences()) return false;
1765 work_done_in_mark_descendants_ = 0;
1766
1767 int num_extended = 0;
1768 int num_removed = 0;
1769 int num_added = 0;
1770
1771 // Data to detect inclusion of base amo into extend amo.
1772 std::vector<int> detector_clique_index;
1774 InclusionDetector detector(storage, time_limit_);
1775 detector.SetWorkLimit(1e9);
1776
1777 std::vector<int> dense_index_to_index;
1779 max_cliques_containing(implications_.size());
1780
1781 const std::vector<std::pair<int, int>> index_size_vector =
1782 FilterAndSortAtMostOnes(absl::MakeSpan(*at_most_ones));
1783
1784 absl::flat_hash_set<int> cannot_be_removed;
1785 std::vector<bool> was_extended(at_most_ones->size(), false);
1786 for (const auto& [index, old_size] : index_size_vector) {
1787 std::vector<Literal>& clique = (*at_most_ones)[index];
1788 if (time_limit_->LimitReached()) break;
1789
1790 // Special case for clique of size 2, we don't expand them if they
1791 // are included in an already added clique.
1792 if (clique.size() == 2) {
1793 DCHECK_NE(clique[0], clique[1]);
1794 const int dense_index = ElementInIntersectionOrMinusOne(
1795 max_cliques_containing[clique[0]], max_cliques_containing[clique[1]]);
1796 if (dense_index >= 0) {
1797 const int superset_index = dense_index_to_index[dense_index];
1798 if (was_extended[superset_index]) {
1799 cannot_be_removed.insert(superset_index);
1800 }
1801 ++num_removed;
1802 clique.clear();
1803 continue;
1804 }
1805 }
1806
1807 // Save the non-extended version as possible subset.
1808 // TODO(user): Detect on the fly is superset already exist.
1809 detector_clique_index.push_back(index);
1810 detector.AddPotentialSubset(storage.AddLiterals(clique));
1811
1812 // We only expand the clique as long as we didn't spend too much time.
1813 if (work_done_in_mark_descendants_ < max_num_explored_nodes) {
1814 clique = ExpandAtMostOne(clique, max_num_explored_nodes);
1815 }
1816
1817 // Save the extended version as possible superset.
1818 detector_clique_index.push_back(index);
1819 detector.AddPotentialSuperset(storage.AddLiterals(clique));
1820
1821 // Also index clique for size 2 quick lookup.
1822 const int dense_index = dense_index_to_index.size();
1823 dense_index_to_index.push_back(index);
1824 for (const Literal l : clique) {
1825 max_cliques_containing[l].push_back(dense_index);
1826 }
1827
1828 if (clique.size() > old_size) {
1829 was_extended[index] = true;
1830 ++num_extended;
1831 }
1832 ++num_added;
1833 }
1834
1835 // Remove clique (before extension) that are included in an extended one.
1836 detector.DetectInclusions([&](int subset, int superset) {
1837 const int subset_index = detector_clique_index[subset];
1838 const int superset_index = detector_clique_index[superset];
1839 if (subset_index == superset_index) return;
1840
1841 // Abort if one was already deleted.
1842 if ((*at_most_ones)[subset_index].empty()) return;
1843 if ((*at_most_ones)[superset_index].empty()) return;
1844
1845 // If an extended clique already cover a deleted one, we cannot try to
1846 // remove it by looking at its non-extended version.
1847 if (cannot_be_removed.contains(subset_index)) return;
1848
1849 ++num_removed;
1850 (*at_most_ones)[subset_index].clear();
1851 if (was_extended[superset_index]) cannot_be_removed.insert(superset_index);
1852 });
1853
1854 if (num_extended > 0 || num_removed > 0 || num_added > 0) {
1855 VLOG(1) << "Clique Extended: " << num_extended
1856 << " Removed: " << num_removed << " Added: " << num_added
1857 << (work_done_in_mark_descendants_ > max_num_explored_nodes
1858 ? " (Aborted)"
1859 : "");
1860 }
1861 return true;
1862}
1863
1865 absl::Span<std::vector<Literal>> at_most_ones,
1866 int64_t max_num_explored_nodes, double* dtime) {
1867 // The code below assumes a DAG.
1868 if (!DetectEquivalences()) return false;
1869 work_done_in_mark_descendants_ = 0;
1870
1871 const std::vector<std::pair<int, int>> index_size_vector =
1872 FilterAndSortAtMostOnes(at_most_ones);
1873
1874 // Data to detect inclusion of base amo into extend amo.
1875 std::vector<int> detector_clique_index;
1877 for (const auto& [index, old_size] : index_size_vector) {
1878 if (time_limit_->LimitReached()) break;
1879 detector_clique_index.push_back(index);
1880 storage.AddLiterals(at_most_ones[index]);
1881 }
1882
1883 // We use an higher limit here as the code is faster.
1884 SubsetsDetector detector(storage, time_limit_);
1885 detector.SetWorkLimit(10 * max_num_explored_nodes);
1886 detector.IndexAllStorageAsSubsets();
1887
1888 // Now try to expand one by one.
1889 //
1890 // TODO(user): We should process clique with elements in common together so
1891 // that we can reuse MarkDescendants() which is slow. We should be able to
1892 // "cache" a few of the last calls.
1893 std::vector<int> intersection;
1894 const int num_to_consider = index_size_vector.size();
1895 for (int subset_index = 0; subset_index < num_to_consider; ++subset_index) {
1896 const int index = index_size_vector[subset_index].first;
1897 std::vector<Literal>& clique = at_most_ones[index];
1898 if (clique.empty()) continue; // Was deleted.
1899
1900 if (work_done_in_mark_descendants_ > max_num_explored_nodes) break;
1901 if (detector.Stopped()) break;
1902
1903 // We start with the clique in the "intersection".
1904 // This prefix will never change.
1905 int clique_i = 0;
1906 int next_index_to_try = 0;
1907 intersection.clear();
1908 tmp_bitset_.ClearAndResize(LiteralIndex(implications_.size()));
1909 for (const Literal l : clique) {
1910 intersection.push_back(l.Index().value());
1911 tmp_bitset_.Set(l);
1912 }
1913
1914 while (true) {
1915 if (work_done_in_mark_descendants_ > max_num_explored_nodes) break;
1916 if (detector.Stopped()) break;
1917
1918 // Compute the intersection of all the element (or the new ones) of this
1919 // clique.
1920 //
1921 // Optimization: if clique_i > 0 && intersection.size() == clique.size()
1922 // we already know that we performed the max possible extension.
1923 if (clique_i > 0 && intersection.size() == clique.size()) {
1924 clique_i = clique.size();
1925 }
1926 for (; clique_i < clique.size(); ++clique_i) {
1927 const Literal l = clique[clique_i];
1928
1929 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
1930 MarkDescendants(l);
1931
1932 if (clique_i == 0) {
1933 // Initially we have the clique + the negation of everything
1934 // propagated by l.
1935 for (const LiteralIndex index :
1936 is_marked_.PositionsSetAtLeastOnce()) {
1937 const Literal lit = Literal(index).Negated();
1938 if (!tmp_bitset_[lit]) {
1939 intersection.push_back(lit.Index().value());
1940 }
1941 }
1942 } else {
1943 // We intersect we the negation of everything propagated by not(l).
1944 // Note that we always keep the clique in case some implication where
1945 // not added to the graph.
1946 int new_size = 0;
1947 const int old_size = intersection.size();
1948 for (int i = 0; i < old_size; ++i) {
1949 if (i == next_index_to_try) {
1950 next_index_to_try = new_size;
1951 }
1952 const int index = intersection[i];
1953 const Literal lit = Literal(LiteralIndex(index));
1954 if (tmp_bitset_[lit] || is_marked_[lit.Negated()]) {
1955 intersection[new_size++] = index;
1956 }
1957 }
1958 intersection.resize(new_size);
1959 }
1960
1961 // We can abort early as soon as there is no extra literal than the
1962 // initial clique.
1963 if (intersection.size() <= clique.size()) break;
1964 }
1965
1966 // Should contains the original clique. If there are no more entry, then
1967 // we will not extend this clique. However, we still call FindSubsets() in
1968 // order to remove fully included ones.
1969 CHECK_GE(intersection.size(), clique.size());
1970
1971 // Look for element included in the intersection.
1972 // Note that we clear element fully included at the same time.
1973 //
1974 // TODO(user): next_index_to_try help, but we might still rescan most of
1975 // the one-watcher list of intersection[next_index_to_try], we could be
1976 // a bit faster here.
1977 int num_extra = 0;
1978 detector.FindSubsets(intersection, &next_index_to_try, [&](int subset) {
1979 if (subset == subset_index) {
1980 detector.StopProcessingCurrentSubset();
1981 return;
1982 }
1983
1984 num_extra = 0;
1985 for (const int index : storage[subset]) {
1986 const LiteralIndex lit_index = LiteralIndex(index);
1987 if (tmp_bitset_[lit_index]) continue; // In clique.
1988 tmp_bitset_.Set(lit_index);
1989 clique.push_back(Literal(lit_index)); // extend.
1990 ++num_extra;
1991 }
1992 if (num_extra == 0) {
1993 // Fully included -- remove.
1994 at_most_ones[detector_clique_index[subset]].clear();
1995 detector.StopProcessingCurrentSubset();
1996 return;
1997 }
1998
1999 detector.StopProcessingCurrentSuperset(); // Finish.
2000 });
2001
2002 // No extension: end loop.
2003 if (num_extra == 0) break;
2004 }
2005 }
2006 if (dtime != nullptr) {
2007 *dtime +=
2008 1e-8 * work_done_in_mark_descendants_ + 1e-9 * detector.work_done();
2009 }
2010 return true;
2011}
2012
2013template <bool use_weight>
2015 const absl::Span<const Literal> at_most_one,
2016 const util_intops::StrongVector<LiteralIndex, bool>& can_be_included,
2017 const util_intops::StrongVector<LiteralIndex, double>& expanded_lp_values) {
2018 std::vector<Literal> clique(at_most_one.begin(), at_most_one.end());
2019 std::vector<LiteralIndex> intersection;
2020 const int64_t old_work = work_done_in_mark_descendants_;
2021 for (int i = 0; i < clique.size(); ++i) {
2022 // Do not spend too much time here.
2023 if (work_done_in_mark_descendants_ - old_work > 1e8) break;
2024
2025 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
2026 MarkDescendants(clique[i]);
2027 if (i == 0) {
2028 for (const LiteralIndex index : is_marked_.PositionsSetAtLeastOnce()) {
2029 if (can_be_included[Literal(index).NegatedIndex()]) {
2030 intersection.push_back(index);
2031 }
2032 }
2033 for (const Literal l : clique) is_marked_.Clear(l.NegatedIndex());
2034 }
2035
2036 int new_size = 0;
2037 is_marked_.Clear(clique[i]);
2038 is_marked_.Clear(clique[i].NegatedIndex());
2039 for (const LiteralIndex index : intersection) {
2040 if (!is_marked_[index]) continue;
2041 intersection[new_size++] = index;
2042 }
2043 intersection.resize(new_size);
2044 if (intersection.empty()) break;
2045
2046 // Expand? The negation of any literal in the intersection is a valid way
2047 // to extend the clique.
2048 if (i + 1 == clique.size()) {
2049 // Heuristic: use literal with largest lp value. We randomize slightly.
2050 int index = -1;
2051 double max_lp = 0.0;
2052 for (int j = 0; j < intersection.size(); ++j) {
2053 // If we don't use weight, we prefer variable that comes first.
2054 const double lp =
2055 use_weight
2056 ? expanded_lp_values[Literal(intersection[j]).NegatedIndex()] +
2057 absl::Uniform<double>(*random_, 0.0, 1e-4)
2058 : can_be_included.size() - intersection[j].value();
2059 if (index == -1 || lp > max_lp) {
2060 index = j;
2061 max_lp = lp;
2062 }
2063 }
2064 if (index != -1) {
2065 clique.push_back(Literal(intersection[index]).Negated());
2066 std::swap(intersection.back(), intersection[index]);
2067 intersection.pop_back();
2068 }
2069 }
2070 }
2071 return clique;
2072}
2073
2074// Make sure both version are compiled.
2075template std::vector<Literal>
2077 const absl::Span<const Literal> at_most_one,
2078 const util_intops::StrongVector<LiteralIndex, bool>& can_be_included,
2079 const util_intops::StrongVector<LiteralIndex, double>& expanded_lp_values);
2080template std::vector<Literal>
2082 const absl::Span<const Literal> at_most_one,
2083 const util_intops::StrongVector<LiteralIndex, bool>& can_be_included,
2084 const util_intops::StrongVector<LiteralIndex, double>& expanded_lp_values);
2085
2086// This function and the generated cut serves two purpose:
2087// 1/ If a new clause of size 2 was discovered and not included in the LP, we
2088// will add it.
2089// 2/ The more classical clique cut separation algorithm
2090//
2091// Note that once 1/ Is performed, any literal close to 1.0 in the lp shouldn't
2092// be in the same clique as a literal with positive weight. So for step 2, we
2093// only really need to consider fractional variables.
2094const std::vector<std::vector<Literal>>&
2096 absl::Span<const Literal> literals, absl::Span<const double> lp_values,
2097 absl::Span<const double> reduced_costs) {
2098 // We only want to generate a cut with literals from the LP, not extra ones.
2099 const int num_literals = implications_.size();
2100 util_intops::StrongVector<LiteralIndex, bool> can_be_included(num_literals,
2101 false);
2103 num_literals, 0.0);
2105 num_literals, 0.0);
2106 const int size = literals.size();
2107 for (int i = 0; i < size; ++i) {
2108 const Literal l = literals[i];
2109 can_be_included[l] = true;
2110 can_be_included[l.NegatedIndex()] = true;
2111
2112 const double value = lp_values[i];
2113 expanded_lp_values[l] = value;
2114 expanded_lp_values[l.NegatedIndex()] = 1.0 - value;
2115
2116 // This is used for extending clique-cuts.
2117 // In most situation, we will only extend the cuts with literal at zero,
2118 // and we prefer "low" reduced cost first, so we negate it. Variable with
2119 // high reduced costs will likely stay that way and are of less interest in
2120 // a clique cut. At least that is my interpretation.
2121 //
2122 // TODO(user): For large problems or when we base the clique from a newly
2123 // added and violated 2-clique, we might consider only a subset of
2124 // fractional variables, so we might need to include fractional variable
2125 // first, but then their rc should be zero, so it should be already kind of
2126 // doing that.
2127 //
2128 // Remark: This seems to have a huge impact to the cut performance!
2129 const double rc = reduced_costs[i];
2130 heuristic_weights[l] = -rc;
2131 heuristic_weights[l.NegatedIndex()] = rc;
2132 }
2133
2134 // We want highest sum first.
2135 struct Candidate {
2136 Literal a;
2137 Literal b;
2138 double sum;
2139 bool operator<(const Candidate& other) const { return sum > other.sum; }
2140 };
2141 std::vector<Candidate> candidates;
2142
2143 // First heuristic. Currently we only consider violated at most one of size 2,
2144 // and extend them. Right now, the code is a bit slow to try too many at every
2145 // LP node so it is why we are defensive like this. Note also that because we
2146 // currently still statically add the initial implications, this will only add
2147 // cut based on newly learned binary clause. Or the one that were not added
2148 // to the relaxation in the first place.
2149 std::vector<Literal> fractional_literals;
2150 for (int i = 0; i < size; ++i) {
2151 Literal current_literal = literals[i];
2152 double current_value = lp_values[i];
2153 if (trail_->Assignment().LiteralIsAssigned(current_literal)) continue;
2154 if (is_redundant_[current_literal]) continue;
2155
2156 if (current_value < 0.5) {
2157 current_literal = current_literal.Negated();
2158 current_value = 1.0 - current_value;
2159 }
2160
2161 if (current_value < 0.99) {
2162 fractional_literals.push_back(current_literal);
2163 }
2164
2165 // We consider only one candidate for each current_literal.
2166 LiteralIndex best = kNoLiteralIndex;
2167 double best_value = 0.0;
2168 for (const Literal l : implications_[current_literal]) {
2169 if (!can_be_included[l]) continue;
2170 const double activity =
2171 current_value + expanded_lp_values[l.NegatedIndex()];
2172 if (activity <= 1.01) continue;
2173 const double v = activity + absl::Uniform<double>(*random_, 0.0, 1e-4);
2174 if (best == kNoLiteralIndex || v > best_value) {
2175 best_value = v;
2176 best = l.NegatedIndex();
2177 }
2178 }
2179 if (best != kNoLiteralIndex) {
2180 const double activity = current_value + expanded_lp_values[best];
2181 candidates.push_back({current_literal, Literal(best), activity});
2182 }
2183 }
2184
2185 // Do not genate too many cut at once.
2186 const int kMaxNumberOfCutPerCall = 10;
2187 std::sort(candidates.begin(), candidates.end());
2188 if (candidates.size() > kMaxNumberOfCutPerCall) {
2189 candidates.resize(kMaxNumberOfCutPerCall);
2190 }
2191
2192 // Expand to a maximal at most one each candidates before returning them.
2193 // Note that we only expand using literal from the LP.
2194 tmp_cuts_.clear();
2195 for (const Candidate& candidate : candidates) {
2196 tmp_cuts_.push_back(ExpandAtMostOneWithWeight(
2197 {candidate.a, candidate.b}, can_be_included, heuristic_weights));
2198 }
2199
2200 // Once we processed new implications, also add "proper" clique cuts.
2201 // We can generate a small graph and separate cut efficiently there.
2202 if (fractional_literals.size() > 1) {
2203 // Lets permute this randomly and truncate if we have too many variables.
2204 // Since we use bitset it is good to have a multiple of 64 there.
2205 //
2206 // TODO(user): Prefer more fractional variables.
2207 const int max_graph_size = 1024;
2208 if (fractional_literals.size() > max_graph_size) {
2209 std::shuffle(fractional_literals.begin(), fractional_literals.end(),
2210 *random_);
2211 fractional_literals.resize(max_graph_size);
2212 }
2213
2214 bron_kerbosch_.Initialize(fractional_literals.size() * 2);
2215
2216 // Prepare a dense mapping.
2217 int i = 0;
2218 tmp_mapping_.resize(implications_.size(), -1);
2219 for (const Literal l : fractional_literals) {
2220 bron_kerbosch_.SetWeight(i, expanded_lp_values[l]);
2221 tmp_mapping_[l] = i++;
2222 bron_kerbosch_.SetWeight(i, expanded_lp_values[l.Negated()]);
2223 tmp_mapping_[l.Negated()] = i++;
2224 }
2225
2226 // Copy the implication subgraph and remap it to a dense indexing.
2227 //
2228 // TODO(user): Treat at_most_one more efficiently. We can collect them
2229 // and scan each of them just once.
2230 for (const Literal base : fractional_literals) {
2231 for (const Literal l : {base, base.Negated()}) {
2232 const int from = tmp_mapping_[l];
2233 for (const Literal next : DirectImplications(l)) {
2234 // l => next so (l + not(next) <= 1).
2235 const int to = tmp_mapping_[next.Negated()];
2236 if (to != -1) {
2237 bron_kerbosch_.AddEdge(from, to);
2238 }
2239 }
2240 }
2241 }
2242
2243 // Before running the algo, compute the transitive closure.
2244 // The graph shouldn't be too large, so this should be fast enough.
2245 bron_kerbosch_.TakeTransitiveClosureOfImplicationGraph();
2246
2247 bron_kerbosch_.SetWorkLimit(1e8);
2248 bron_kerbosch_.SetMinimumWeight(1.001);
2249 std::vector<std::vector<int>> cliques = bron_kerbosch_.Run();
2250
2251 // If we have many candidates, we will only expand the first few with
2252 // maximum weights.
2253 const int max_num_per_batch = 5;
2254 std::vector<std::pair<int, double>> with_weight =
2255 bron_kerbosch_.GetMutableIndexAndWeight();
2256 if (with_weight.size() > max_num_per_batch) {
2257 std::sort(
2258 with_weight.begin(), with_weight.end(),
2259 [](const std::pair<int, double>& a, const std::pair<int, double>& b) {
2260 return a.second > b.second;
2261 });
2262 with_weight.resize(max_num_per_batch);
2263 }
2264
2265 std::vector<Literal> at_most_one;
2266 for (const auto [index, weight] : with_weight) {
2267 // Convert.
2268 at_most_one.clear();
2269 for (const int i : cliques[index]) {
2270 const Literal l = fractional_literals[i / 2];
2271 at_most_one.push_back(i % 2 == 1 ? l.Negated() : l);
2272 }
2273
2274 // Expand and add clique.
2275 //
2276 // TODO(user): Expansion is pretty slow. Given that the base clique can
2277 // share literal beeing part of the same amo, we should be able to speed
2278 // that up, we don't want to scan an amo twice basically.
2279 tmp_cuts_.push_back(ExpandAtMostOneWithWeight(
2280 at_most_one, can_be_included, heuristic_weights));
2281 }
2282
2283 // Clear the dense mapping
2284 for (const Literal l : fractional_literals) {
2285 tmp_mapping_[l] = -1;
2286 tmp_mapping_[l.Negated()] = -1;
2287 }
2288 }
2289
2290 return tmp_cuts_;
2291}
2292
2293// TODO(user): Use deterministic limit.
2294// TODO(user): Explore the graph instead of just looking at full amo, especially
2295// since we expand small ones.
2296std::vector<absl::Span<const Literal>>
2297BinaryImplicationGraph::HeuristicAmoPartition(std::vector<Literal>* literals) {
2298 std::vector<absl::Span<const Literal>> result;
2299
2301 implications_.size(), false);
2302 for (const Literal l : *literals) to_consider[l] = true;
2303
2304 // Priority queue of (intersection_size, start_of_amo).
2305 std::priority_queue<std::pair<int, int>> pq;
2306
2307 // Compute for each at most one that might overlap, its overlaping size and
2308 // stores its start in the at_most_one_buffer_.
2309 //
2310 // This is in O(num_literal in amo).
2311 absl::flat_hash_set<int> explored_amo;
2312 for (const Literal l : *literals) {
2313 if (l.Index() >= at_most_ones_.size()) continue;
2314 for (const int start : at_most_ones_[l]) {
2315 const auto [_, inserted] = explored_amo.insert(start);
2316 if (!inserted) continue;
2317
2318 int intersection_size = 0;
2319 for (const Literal l : AtMostOne(start)) {
2320 if (to_consider[l]) ++intersection_size;
2321 }
2322 if (intersection_size > 1) {
2323 pq.push({intersection_size, start});
2324 }
2325
2326 // Abort early if we are done.
2327 if (intersection_size == literals->size()) break;
2328 }
2329 }
2330
2331 // Consume AMOs, update size.
2332 int num_processed = 0;
2333 while (!pq.empty()) {
2334 const auto [old_size, start] = pq.top();
2335 pq.pop();
2336
2337 // Recompute size.
2338 int intersection_size = 0;
2339 for (const Literal l : AtMostOne(start)) {
2340 if (to_consider[l]) ++intersection_size;
2341 }
2342 if (intersection_size != old_size) {
2343 // re-add with new size.
2344 if (intersection_size > 1) {
2345 pq.push({intersection_size, start});
2346 }
2347 continue;
2348 }
2349
2350 // Mark the literal of that at most one at false.
2351 for (const Literal l : AtMostOne(start)) {
2352 to_consider[l] = false;
2353 }
2354
2355 // Extract the intersection by moving it in
2356 // [num_processed, num_processed + intersection_size).
2357 const int span_start = num_processed;
2358 for (int i = num_processed; i < literals->size(); ++i) {
2359 if (to_consider[(*literals)[i]]) continue;
2360 std::swap((*literals)[num_processed], (*literals)[i]);
2361 ++num_processed;
2362 }
2363 result.push_back(absl::MakeSpan(literals->data() + span_start,
2364 num_processed - span_start));
2365 }
2366 return result;
2367}
2368
2369void BinaryImplicationGraph::MarkDescendants(Literal root) {
2370 auto* const stack = bfs_stack_.data();
2371 auto is_marked = is_marked_.BitsetView();
2372 auto is_redundant = is_redundant_.const_view();
2373 if (is_redundant[root]) return;
2374
2375 int stack_size = 1;
2376 stack[0] = root;
2377 is_marked_.Set(root);
2378 const int amo_size = static_cast<int>(at_most_ones_.size());
2379 auto implies_something = implies_something_.const_view();
2380 for (int j = 0; j < stack_size; ++j) {
2381 const Literal current = stack[j];
2382 if (!implies_something[current]) continue;
2383
2384 work_done_in_mark_descendants_ += implications_[current].size();
2385 for (const Literal l : implications_[current]) {
2386 if (!is_marked[l] && !is_redundant[l]) {
2387 is_marked_.SetUnsafe(is_marked, l);
2388 stack[stack_size++] = l;
2389 }
2390 }
2391
2392 if (current.Index() >= amo_size) continue;
2393 for (const int start : at_most_ones_[current]) {
2394 work_done_in_mark_descendants_ += AtMostOne(start).size();
2395 for (const Literal l : AtMostOne(start)) {
2396 if (l == current) continue;
2397 if (!is_marked[l.NegatedIndex()] && !is_redundant[l.NegatedIndex()]) {
2398 is_marked_.SetUnsafe(is_marked, l.NegatedIndex());
2399 stack[stack_size++] = l.Negated();
2400 }
2401 }
2402 }
2403 }
2404 work_done_in_mark_descendants_ += stack_size;
2405}
2406
2407std::vector<Literal> BinaryImplicationGraph::ExpandAtMostOne(
2408 const absl::Span<const Literal> at_most_one,
2409 int64_t max_num_explored_nodes) {
2410 std::vector<Literal> clique(at_most_one.begin(), at_most_one.end());
2411
2412 // Optim.
2413 for (const Literal l : clique) {
2414 if (!implies_something_[l]) {
2415 return clique;
2416 }
2417 }
2418
2419 // TODO(user): Improve algorithm. If we do a dfs, we can know if a literal
2420 // has no descendant in the current intersection. We can keep such literal
2421 // marked.
2422 std::vector<LiteralIndex> intersection;
2423 for (int i = 0; i < clique.size(); ++i) {
2424 if (work_done_in_mark_descendants_ > max_num_explored_nodes) break;
2425 is_marked_.ClearAndResize(LiteralIndex(implications_.size()));
2426 MarkDescendants(clique[i]);
2427
2428 if (i == 0) {
2429 intersection = is_marked_.PositionsSetAtLeastOnce();
2430 for (const Literal l : clique) is_marked_.Clear(l.NegatedIndex());
2431 }
2432
2433 int new_size = 0;
2434 is_marked_.Clear(clique[i].NegatedIndex()); // TODO(user): explain.
2435 for (const LiteralIndex index : intersection) {
2436 if (is_marked_[index]) intersection[new_size++] = index;
2437 }
2438 intersection.resize(new_size);
2439 if (intersection.empty()) break;
2440
2441 // TODO(user): If the intersection is small compared to the members of the
2442 // clique left to explore, we could look at the descendants of the negated
2443 // intersection instead.
2444
2445 // Expand?
2446 if (i + 1 == clique.size()) {
2447 clique.push_back(Literal(intersection.back()).Negated());
2448 intersection.pop_back();
2449 }
2450 }
2451 return clique;
2452}
2453
2454// TODO(user): lazy cleanup the lists on is_removed_?
2455// TODO(user): Mark fixed variable as is_removed_ for faster iteration?
2457 Literal literal) {
2458 DCHECK(!is_removed_[literal]);
2459
2460 // Clear old state.
2461 for (const Literal l : direct_implications_) {
2462 in_direct_implications_[l] = false;
2463 }
2464 direct_implications_.clear();
2465
2466 // Fill new state.
2467 const VariablesAssignment& assignment = trail_->Assignment();
2468 DCHECK(!assignment.LiteralIsAssigned(literal));
2469 for (const Literal l : implications_[literal]) {
2470 if (l == literal) continue;
2471 if (assignment.LiteralIsAssigned(l)) continue;
2472 if (!is_removed_[l] && !in_direct_implications_[l]) {
2473 in_direct_implications_[l] = true;
2474 direct_implications_.push_back(l);
2475 }
2476 }
2477 if (literal.Index() < at_most_ones_.size()) {
2478 if (is_redundant_[literal]) {
2479 DCHECK(at_most_ones_[literal].empty());
2480 }
2481 for (const int start : at_most_ones_[literal]) {
2482 for (const Literal l : AtMostOne(start)) {
2483 if (l == literal) continue;
2484 if (assignment.LiteralIsAssigned(l)) continue;
2485 if (!is_removed_[l] && !in_direct_implications_[l.NegatedIndex()]) {
2486 in_direct_implications_[l.NegatedIndex()] = true;
2487 direct_implications_.push_back(l.Negated());
2488 }
2489 }
2490 }
2491 }
2492 estimated_sizes_[literal] = direct_implications_.size();
2493 return direct_implications_;
2494}
2495
2496absl::Span<const Literal> BinaryImplicationGraph::AtMostOne(int start) const {
2497 const int size = at_most_one_buffer_[start].Index().value();
2498 return absl::MakeSpan(&at_most_one_buffer_[start + 1], size);
2499}
2500
2502 const int size1 = implications_[lhs].size();
2503 const int size2 =
2504 lhs.Index() < at_most_ones_.size() ? at_most_ones_[lhs].size() : 0;
2505 if (size1 + size2 == 0) return kNoLiteralIndex;
2506
2507 const int choice = absl::Uniform<int>(*random_, 0, size1 + size2);
2508 if (choice < size1) {
2509 return implications_[lhs][choice].Index();
2510 }
2511
2512 const absl::Span<const Literal> amo =
2513 AtMostOne(at_most_ones_[lhs][choice - size1]);
2514 CHECK_GE(amo.size(), 2);
2515 const int first_choice = absl::Uniform<int>(*random_, 0, amo.size());
2516 const Literal lit = amo[first_choice];
2517 if (lit != lhs) return lit.NegatedIndex();
2518
2519 // We are unlucky and just picked the wrong literal: take a different one.
2520 int next_choice = absl::Uniform<int>(*random_, 0, amo.size() - 1);
2521 if (next_choice >= first_choice) {
2522 next_choice += 1;
2523 }
2524 CHECK_NE(amo[next_choice], lhs);
2525 return amo[next_choice].NegatedIndex();
2526}
2527
2529 bool* is_unsat) {
2530 const int saved_index = propagation_trail_index_;
2531 DCHECK_EQ(propagation_trail_index_, trail_->Index()); // Propagation done.
2532
2533 const VariablesAssignment& assignment = trail_->Assignment();
2534 if (assignment.VariableIsAssigned(var)) return false;
2535
2536 const Literal literal(var, true);
2537 direct_implications_of_negated_literal_ =
2538 DirectImplications(literal.Negated());
2539 DirectImplications(literal); // Fill in_direct_implications_.
2540 for (const Literal l : direct_implications_of_negated_literal_) {
2541 if (in_direct_implications_[l]) {
2542 // not(l) => literal => l.
2543 if (!FixLiteral(l)) {
2544 *is_unsat = true;
2545 return false;
2546 }
2547 }
2548 }
2549
2550 return propagation_trail_index_ > saved_index;
2551}
2552
2554 BooleanVariable var) {
2555 const Literal literal(var, true);
2556 int64_t result = 0;
2557 direct_implications_of_negated_literal_ =
2558 DirectImplications(literal.Negated());
2559 const int64_t s1 = DirectImplications(literal).size();
2560 for (const Literal l : direct_implications_of_negated_literal_) {
2561 result += s1;
2562
2563 // We should have dealt with that in FindFailedLiteralAroundVar().
2564 DCHECK(!in_direct_implications_[l]);
2565
2566 // l => literal => l: equivalent variable!
2567 if (in_direct_implications_[l.NegatedIndex()]) result--;
2568 }
2569 return result;
2570}
2571
2572// For all possible a => var => b, add a => b.
2574 BooleanVariable var, std::deque<std::vector<Literal>>* postsolve_clauses) {
2575 const Literal literal(var, true);
2576 DCHECK(!is_removed_[literal.Index()]);
2577 DCHECK(!is_redundant_[literal.Index()]);
2578
2579 direct_implications_of_negated_literal_ =
2580 DirectImplications(literal.Negated());
2581 for (const Literal b : DirectImplications(literal)) {
2582 if (is_removed_[b]) continue;
2583 DCHECK(!is_redundant_[b]);
2584 estimated_sizes_[b.NegatedIndex()]--;
2585 for (const Literal a_negated : direct_implications_of_negated_literal_) {
2586 if (a_negated.Negated() == b) continue;
2587 if (is_removed_[a_negated]) continue;
2588 AddImplication(a_negated.Negated(), b);
2589 }
2590 }
2591 for (const Literal a_negated : direct_implications_of_negated_literal_) {
2592 if (is_removed_[a_negated]) continue;
2593 DCHECK(!is_redundant_[a_negated]);
2594 estimated_sizes_[a_negated.NegatedIndex()]--;
2595 }
2596
2597 // Notify the deletion to the proof checker and the postsolve.
2598 // Note that we want var first in these clauses for the postsolve.
2599 for (const Literal b : direct_implications_) {
2600 if (drat_proof_handler_ != nullptr) {
2601 drat_proof_handler_->DeleteClause({Literal(var, false), b});
2602 }
2603 postsolve_clauses->push_back({Literal(var, false), b});
2604 }
2605 for (const Literal a_negated : direct_implications_of_negated_literal_) {
2606 if (drat_proof_handler_ != nullptr) {
2607 drat_proof_handler_->DeleteClause({Literal(var, true), a_negated});
2608 }
2609 postsolve_clauses->push_back({Literal(var, true), a_negated});
2610 }
2611
2612 // We need to remove any occurrence of var in our implication lists, this will
2613 // be delayed to the CleanupAllRemovedVariables() call.
2614 for (const LiteralIndex index : {literal.Index(), literal.NegatedIndex()}) {
2615 is_removed_[index] = true;
2616 implications_[index].clear();
2617 if (!is_redundant_[index]) {
2618 ++num_redundant_literals_;
2619 is_redundant_.Set(index);
2620 }
2621 }
2622}
2623
2625 std::deque<std::vector<Literal>>* postsolve_clauses) {
2626 for (LiteralIndex a(0); a < implications_.size(); ++a) {
2627 if (is_redundant_[a] && !is_removed_[a]) {
2628 postsolve_clauses->push_back(
2630 is_removed_[a] = true;
2631 gtl::STLClearObject(&(implications_[a]));
2632 continue;
2633 }
2634
2635 int new_size = 0;
2636 auto& implication = implications_[a];
2637 for (const Literal l : implication) {
2638 if (!is_redundant_[l]) {
2639 implication[new_size++] = l;
2640 }
2641 }
2642 implication.resize(new_size);
2643 }
2644}
2645
2647 const VariablesAssignment& assignment = trail_->Assignment();
2648 for (LiteralIndex a(0); a < implications_.size(); ++a) {
2649 if (is_removed_[a] || assignment.LiteralIsAssigned(Literal(a))) {
2650 if (DEBUG_MODE && assignment.LiteralIsTrue(Literal(a))) {
2651 // The code assumes that everything is already propagated.
2652 // Otherwise we will remove implications that didn't propagate yet!
2653 for (const Literal lit : implications_[a]) {
2654 DCHECK(trail_->Assignment().LiteralIsTrue(lit));
2655 }
2656 }
2657
2658 gtl::STLClearObject(&(implications_[a]));
2659 continue;
2660 }
2661
2662 int new_size = 0;
2663 auto& implication = implications_[a];
2664 for (const Literal l : implication) {
2665 if (!is_removed_[l] && !assignment.LiteralIsTrue(l)) {
2666 implication[new_size++] = l;
2667 }
2668 }
2669 implication.resize(new_size);
2670 }
2671
2672 // Clean-up at most ones.
2673 at_most_ones_.clear();
2674 CleanUpAndAddAtMostOnes(/*base_index=*/0);
2675
2676 // Note that to please the invariant() we also removed fixed literal above.
2677 DCHECK(InvariantsAreOk());
2678}
2679
2680bool BinaryImplicationGraph::InvariantsAreOk() {
2681 if (time_limit_->LimitReached()) return true;
2682 // We check that if a => b then not(b) => not(a).
2683 absl::flat_hash_set<std::pair<LiteralIndex, LiteralIndex>> seen;
2684 int num_redundant = 0;
2685 int num_fixed = 0;
2686 for (LiteralIndex a_index(0); a_index < implications_.size(); ++a_index) {
2687 if (trail_->Assignment().LiteralIsAssigned(Literal(a_index))) {
2688 ++num_fixed;
2689 if (!implications_[a_index].empty()) {
2690 LOG(ERROR) << "Fixed literal has non-cleared implications";
2691 return false;
2692 }
2693 continue;
2694 }
2695 if (is_removed_[a_index]) {
2696 if (!implications_[a_index].empty()) {
2697 LOG(ERROR) << "Removed literal has non-cleared implications";
2698 return false;
2699 }
2700 continue;
2701 }
2702 if (is_redundant_[a_index]) {
2703 ++num_redundant;
2704 if (implications_[a_index].size() != 1) {
2705 LOG(ERROR)
2706 << "Redundant literal should only point to its representative "
2707 << Literal(a_index) << " => " << implications_[a_index];
2708 return false;
2709 }
2710 }
2711 for (const Literal b : implications_[a_index]) {
2712 seen.insert({a_index, b.Index()});
2713 }
2714 }
2715
2716 // Check that reverse topo order is correct.
2717 util_intops::StrongVector<LiteralIndex, int> lit_to_order;
2718 if (is_dag_) {
2719 lit_to_order.assign(implications_.size(), -1);
2720 for (int i = 0; i < reverse_topological_order_.size(); ++i) {
2721 lit_to_order[reverse_topological_order_[i]] = i;
2722 }
2723 }
2724
2725 VLOG(2) << "num_redundant " << num_redundant;
2726 VLOG(2) << "num_fixed " << num_fixed;
2727 for (LiteralIndex a_index(0); a_index < implications_.size(); ++a_index) {
2728 const LiteralIndex not_a_index = Literal(a_index).NegatedIndex();
2729 for (const Literal b : implications_[a_index]) {
2730 if (is_removed_[b]) {
2731 LOG(ERROR) << "A removed literal still appear! " << Literal(a_index)
2732 << " => " << b;
2733 return false;
2734 }
2735
2736 if (!seen.contains({b.NegatedIndex(), not_a_index})) {
2737 LOG(ERROR) << "We have " << Literal(a_index) << " => " << b
2738 << " but not the reverse implication!";
2739 LOG(ERROR) << "redundant[a]: " << is_redundant_[a_index]
2740 << " assigned[a]: "
2741 << trail_->Assignment().LiteralIsAssigned(Literal(a_index))
2742 << " removed[a]: " << is_removed_[a_index]
2743 << " redundant[b]: " << is_redundant_[b] << " assigned[b]: "
2744 << trail_->Assignment().LiteralIsAssigned(b)
2745 << " removed[b]: " << is_removed_[b];
2746
2747 return false;
2748 }
2749
2750 // Test that if we have a dag, our topo order is correct.
2751 if (is_dag_ && !is_redundant_[b] && !is_redundant_[a_index]) {
2752 DCHECK_NE(lit_to_order[b], -1);
2753 DCHECK_LE(lit_to_order[b], lit_to_order[a_index]);
2754 }
2755 }
2756 }
2757
2758 // Check the at-most ones.
2759 absl::flat_hash_set<std::pair<LiteralIndex, int>> lit_to_start;
2760 for (LiteralIndex i(0); i < at_most_ones_.size(); ++i) {
2761 for (const int start : at_most_ones_[i]) {
2762 lit_to_start.insert({i, start});
2763 }
2764 }
2765
2766 for (int start = 0; start < at_most_one_buffer_.size();) {
2767 const absl::Span<const Literal> amo = AtMostOne(start);
2768 for (const Literal l : amo) {
2769 if (is_removed_[l]) {
2770 LOG(ERROR) << "A removed literal still appear in an amo " << l;
2771 return false;
2772 }
2773 if (!lit_to_start.contains({l, start})) {
2774 return false;
2775 }
2776 }
2777 start += amo.size() + 1;
2778 }
2779
2780 return true;
2781}
2782
2783absl::Span<const Literal> BinaryImplicationGraph::NextAtMostOne() {
2784 if (at_most_one_iterator_ >= at_most_one_buffer_.size()) {
2785 return absl::Span<const Literal>();
2786 }
2787
2788 const absl::Span<const Literal> result = AtMostOne(at_most_one_iterator_);
2789 at_most_one_iterator_ += result.size() + 1;
2790 return result;
2791}
2792
2793// ----- SatClause -----
2794
2795// static
2796SatClause* SatClause::Create(absl::Span<const Literal> literals) {
2797 DCHECK_GE(literals.size(), 2);
2798 SatClause* clause = reinterpret_cast<SatClause*>(
2799 ::operator new(sizeof(SatClause) + literals.size() * sizeof(Literal)));
2800 clause->size_ = literals.size();
2801 for (int i = 0; i < literals.size(); ++i) {
2802 clause->literals_[i] = literals[i];
2803 }
2804 return clause;
2805}
2806
2807// Note that for an attached clause, removing fixed literal is okay because if
2808// any of the watched literal is assigned, then the clause is necessarily true.
2810 const VariablesAssignment& assignment) {
2811 DCHECK(!IsRemoved());
2812 if (assignment.VariableIsAssigned(literals_[0].Variable()) ||
2813 assignment.VariableIsAssigned(literals_[1].Variable())) {
2814 DCHECK(IsSatisfied(assignment));
2815 return true;
2816 }
2817 int j = 2;
2818 while (j < size_ && !assignment.VariableIsAssigned(literals_[j].Variable())) {
2819 ++j;
2820 }
2821 for (int i = j; i < size_; ++i) {
2822 if (assignment.VariableIsAssigned(literals_[i].Variable())) {
2823 if (assignment.LiteralIsTrue(literals_[i])) return true;
2824 } else {
2825 std::swap(literals_[j], literals_[i]);
2826 ++j;
2827 }
2828 }
2829 size_ = j;
2830 return false;
2831}
2832
2833bool SatClause::IsSatisfied(const VariablesAssignment& assignment) const {
2834 for (const Literal literal : *this) {
2835 if (assignment.LiteralIsTrue(literal)) return true;
2836 }
2837 return false;
2838}
2839
2840std::string SatClause::DebugString() const {
2841 std::string result;
2842 for (const Literal literal : *this) {
2843 if (!result.empty()) result.append(" ");
2844 result.append(literal.DebugString());
2845 }
2846 return result;
2847}
2848
2849} // namespace sat
2850} // namespace operations_research
void FindStronglyConnectedComponents(const NodeIndex num_nodes, const Graph &graph, SccOutput *components)
double Get() const
Definition timer.h:46
void Start()
When Start() is called multiple times, only the most recent is used.
Definition timer.h:32
ConstView const_view() const
Definition bitset.h:460
void Set(IntegerType index)
Definition bitset.h:885
Bitset64< IntegerType >::View BitsetView()
A bit hacky for really hot loop.
Definition bitset.h:893
void SetUnsafe(typename Bitset64< IntegerType >::View view, IntegerType index)
Definition bitset.h:894
bool ComputeTransitiveReduction(bool log_info=false)
Definition clause.cc:1490
Literal RepresentativeOf(Literal l) const
Definition clause.h:624
void Resize(int num_variables)
Resizes the data structure.
Definition clause.cc:539
const std::vector< std::vector< Literal > > & GenerateAtMostOnesWithLargeWeight(absl::Span< const Literal > literals, absl::Span< const double > lp_values, absl::Span< const double > reduced_costs)
Definition clause.cc:2095
bool Propagate(Trail *trail) final
SatPropagator interface.
Definition clause.cc:811
void RemoveDuplicates()
Clean up implications list that might have duplicates.
Definition clause.cc:558
void MinimizeConflictWithReachability(std::vector< Literal > *c)
Definition clause.cc:903
std::vector< Literal > ExpandAtMostOneWithWeight(absl::Span< const Literal > at_most_one, const util_intops::StrongVector< LiteralIndex, bool > &can_be_included, const util_intops::StrongVector< LiteralIndex, double > &expanded_lp_values)
Same as ExpandAtMostOne() but try to maximize the weight in the clique.
Definition clause.cc:2014
bool TransformIntoMaxCliques(std::vector< std::vector< Literal > > *at_most_ones, int64_t max_num_explored_nodes=1e8)
Definition clause.cc:1760
int64_t NumImplicationOnVariableRemoval(BooleanVariable var)
Definition clause.cc:2553
bool IsEmpty() const final
Returns true if there is no constraints in this class.
Definition clause.h:524
bool AddBinaryClause(Literal a, Literal b)
Definition clause.cc:570
absl::Span< const Literal > NextAtMostOne()
Returns the next at_most_one, or a span of size 0 when finished.
Definition clause.cc:2783
bool AddImplication(Literal a, Literal b)
Definition clause.h:539
void RemoveBooleanVariable(BooleanVariable var, std::deque< std::vector< Literal > > *postsolve_clauses)
For all possible a => var => b, add a => b.
Definition clause.cc:2573
void MinimizeConflictExperimental(const Trail &trail, std::vector< Literal > *c)
Definition clause.cc:1064
void MinimizeConflictFirstWithTransitiveReduction(const Trail &trail, std::vector< Literal > *c, absl::BitGenRef random)
Definition clause.cc:1002
bool DetectEquivalences(bool log_info=false)
Definition clause.cc:1287
void MinimizeConflictFirst(const Trail &trail, std::vector< Literal > *c, SparseBitset< BooleanVariable > *marked)
Definition clause.cc:980
const std::vector< Literal > & DirectImplications(Literal literal)
Definition clause.cc:2456
absl::Span< const Literal > Reason(const Trail &trail, int trail_index, int64_t conflict_id) const final
Definition clause.cc:891
bool MergeAtMostOnes(absl::Span< std::vector< Literal > > at_most_ones, int64_t max_num_explored_nodes=1e8, double *dtime=nullptr)
Definition clause.cc:1864
void RemoveAllRedundantVariables(std::deque< std::vector< Literal > > *postsolve_clauses)
Definition clause.cc:2624
bool FindFailedLiteralAroundVar(BooleanVariable var, bool *is_unsat)
Definition clause.cc:2528
LiteralIndex RandomImpliedLiteral(Literal lhs)
Definition clause.cc:2501
ABSL_MUST_USE_RESULT bool AddAtMostOne(absl::Span< const Literal > at_most_one)
Definition clause.cc:630
std::vector< absl::Span< const Literal > > HeuristicAmoPartition(std::vector< Literal > *literals)
Definition clause.cc:2297
SatClause * InprocessingAddClause(absl::Span< const Literal > new_clause)
Definition clause.cc:437
void LazyDetach(SatClause *clause)
Definition clause.cc:316
void InprocessingRemoveClause(SatClause *clause)
These must only be called between [Detach/Attach]AllClauses() calls.
Definition clause.cc:380
bool IsRemovable(SatClause *const clause) const
Definition clause.h:225
void Detach(SatClause *clause)
Definition clause.cc:323
absl::Span< const Literal > Reason(const Trail &trail, int trail_index, int64_t conflict_id) const final
Definition clause.cc:211
bool AddClause(absl::Span< const Literal > literals, Trail *trail, int lbd)
Adds a new clause and perform initial propagation for this clause only.
Definition clause.cc:225
SatClause * NextClauseToProbe()
Returns the next clause to probe in round-robin order.
Definition clause.cc:529
ClauseManager(Model *model)
--— ClauseManager --—
Definition clause.cc:73
SatClause * ReasonClause(int trail_index) const
Definition clause.cc:217
SatClause * AddRemovableClause(absl::Span< const Literal > literals, Trail *trail, int lbd)
Definition clause.cc:233
void Resize(int num_variables)
Must be called before adding clauses referring to such variables.
Definition clause.cc:89
ABSL_MUST_USE_RESULT bool InprocessingFixLiteral(Literal true_literal)
This one do not need the clause to be detached.
Definition clause.cc:361
void Attach(SatClause *clause, Trail *trail)
Definition clause.cc:296
ABSL_MUST_USE_RESULT bool InprocessingRewriteClause(SatClause *clause, absl::Span< const Literal > new_clause)
Definition clause.cc:389
bool Propagate(Trail *trail) final
SatPropagator API.
Definition clause.cc:202
int AddLiterals(const std::vector< L > &wrapped_values)
Definition util.h:771
size_t size() const
Size of the "key" space, always in [0, size()).
Definition util.h:832
void reserve(int size)
Reserve memory if it is already known or tightly estimated.
Definition util.h:87
void AddClause(absl::Span< const Literal > clause)
void DeleteClause(absl::Span< const Literal > clause)
void DetectInclusions(const std::function< void(int subset, int superset)> &process)
Definition inclusion.h:282
void SetWorkLimit(uint64_t work_limit)
Definition inclusion.h:86
std::string DebugString() const
Definition sat_base.h:100
LiteralIndex NegatedIndex() const
Definition sat_base.h:92
LiteralIndex Index() const
Definition sat_base.h:91
BooleanVariable Variable() const
Definition sat_base.h:87
static SatClause * Create(absl::Span< const Literal > literals)
--— SatClause --—
Definition clause.cc:2796
absl::Span< const Literal > AsSpan() const
Returns a Span<> representation of the clause.
Definition clause.h:99
const Literal * begin() const
Allows for range based iteration: for (Literal literal : clause) {}.
Definition clause.h:78
bool IsSatisfied(const VariablesAssignment &assignment) const
Definition clause.cc:2833
int size() const
Number of literals in the clause.
Definition clause.h:69
std::string DebugString() const
Definition clause.cc:2840
bool RemoveFixedLiteralsAndTestIfTrue(const VariablesAssignment &assignment)
Definition clause.cc:2809
SatPropagator(const std::string &name)
Definition sat_base.h:535
util_intops::StrongVector< LiteralIndex, absl::InlinedVector< int32_t, 6 > > AtMostOnes
Definition clause.cc:1182
SccGraph(SccFinder *finder, Implications *graph, AtMostOnes *at_most_ones, std::vector< Literal > *at_most_one_buffer)
Definition clause.cc:1188
int64_t work_done_
For the deterministic time.
Definition clause.cc:1272
util_intops::StrongVector< LiteralIndex, absl::InlinedVector< Literal, 6 > > Implications
Definition clause.cc:1180
StronglyConnectedComponentsFinder< int32_t, SccGraph, CompactVectorVector< int32_t, int32_t > > SccFinder
Definition clause.cc:1184
std::vector< Literal > to_fix_
All these literals where detected to be true during the SCC computation.
Definition clause.cc:1269
const std::vector< int32_t > & operator[](int32_t node) const
Definition clause.cc:1196
void FindSubsets(absl::Span< const int > superset, int *next_index_to_try, const std::function< void(int subset)> &process)
Definition inclusion.h:480
void SetWorkLimit(uint64_t work_limit)
Definition inclusion.h:176
std::vector< Literal > * MutableConflict()
Definition sat_base.h:433
void Enqueue(Literal true_literal, int propagator_id)
Definition sat_base.h:317
void EnqueueWithUnitReason(Literal true_literal)
Specific Enqueue() version for a fixed variable.
Definition sat_base.h:328
const AssignmentInfo & Info(BooleanVariable var) const
Definition sat_base.h:463
void SetCurrentPropagatorId(int propagator_id)
Definition sat_base.h:307
void FastEnqueue(Literal true_literal)
Definition sat_base.h:310
const VariablesAssignment & Assignment() const
Definition sat_base.h:462
bool VariableIsAssigned(BooleanVariable var) const
Returns true iff the given variable is assigned.
Definition sat_base.h:196
bool LiteralIsAssigned(Literal literal) const
Definition sat_base.h:191
bool LiteralIsFalse(Literal literal) const
Definition sat_base.h:185
bool LiteralIsTrue(Literal literal) const
Definition sat_base.h:188
void assign(size_type n, const value_type &val)
void push_back(const value_type &val)
const bool DEBUG_MODE
Definition macros.h:26
int RemoveIf(RepeatedPtrField< T > *array, const Pred &pr)
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:58
void STLDeleteElements(T *container)
Definition stl_util.h:372
void STLClearObject(T *obj)
Definition stl_util.h:123
const LiteralIndex kNoLiteralIndex(-1)
In SWIG mode, we don't want anything besides these top-level includes.
trees with all degrees equal to
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
#define SCOPED_TIME_STAT(stats)
Definition stats.h:418