Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
old_precedences_propagator.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <stdint.h>
17
18#include <algorithm>
19#include <deque>
20#include <string>
21#include <utility>
22#include <vector>
23
24#include "absl/cleanup/cleanup.h"
25#include "absl/container/inlined_vector.h"
26#include "absl/log/check.h"
27#include "absl/log/log.h"
28#include "absl/log/vlog_is_on.h"
29#include "absl/types/span.h"
33#include "ortools/sat/integer.h"
37#include "ortools/util/bitset.h"
39
40namespace operations_research {
41namespace sat {
42
43namespace {
44
45void AppendLowerBoundReasonIfValid(IntegerVariable var,
46 const IntegerTrail& i_trail,
47 std::vector<IntegerLiteral>* reason) {
48 if (var != kNoIntegerVariable) {
49 reason->push_back(i_trail.LowerBoundAsLiteral(var));
50 }
51}
52
53} // namespace
54
56 if (!VLOG_IS_ON(1)) return;
57 if (shared_stats_ == nullptr) return;
58 std::vector<std::pair<std::string, int64_t>> stats;
59 stats.push_back({"precedences/num_cycles", num_cycles_});
60 stats.push_back({"precedences/num_pushes", num_pushes_});
61 stats.push_back(
62 {"precedences/num_enforcement_pushes", num_enforcement_pushes_});
63 shared_stats_->AddStats(stats);
64}
65
67
70 const Literal literal = (*trail_)[propagation_trail_index_++];
71 if (literal.Index() >= literal_to_new_impacted_arcs_.size()) continue;
72
73 // IMPORTANT: Because of the way Untrail() work, we need to add all the
74 // potential arcs before we can abort. It is why we iterate twice here.
75 for (const ArcIndex arc_index :
76 literal_to_new_impacted_arcs_[literal.Index()]) {
77 if (--arc_counts_[arc_index] == 0) {
78 const ArcInfo& arc = arcs_[arc_index];
79 PushConditionalRelations(arc);
80 impacted_arcs_[arc.tail_var].push_back(arc_index);
81 }
82 }
83
84 // Iterate again to check for a propagation and indirectly update
85 // modified_vars_.
86 for (const ArcIndex arc_index :
87 literal_to_new_impacted_arcs_[literal.Index()]) {
88 if (arc_counts_[arc_index] > 0) continue;
89 const ArcInfo& arc = arcs_[arc_index];
90 const IntegerValue new_head_lb =
91 integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc);
92 if (new_head_lb > integer_trail_->LowerBound(arc.head_var)) {
93 if (!EnqueueAndCheck(arc, new_head_lb, trail_)) return false;
94 }
95 }
96 }
97
98 // Do the actual propagation of the IntegerVariable bounds.
99 InitializeBFQueueWithModifiedNodes();
100 if (!BellmanFordTarjan(trail_)) return false;
101
102 // We can only test that no propagation is left if we didn't enqueue new
103 // literal in the presence of optional variables.
104 //
105 // TODO(user): Because of our code to deal with InPropagationLoop(), this is
106 // not always true. Find a cleaner way to DCHECK() while not failing in this
107 // corner case.
108 if (/*DISABLES CODE*/ (false) &&
109 propagation_trail_index_ == trail_->Index()) {
110 DCHECK(NoPropagationLeft(*trail_));
111 }
112
113 // Propagate the presence literals of the arcs that can't be added.
114 if (!PropagateOptionalArcs(trail_)) return false;
115
116 // Clean-up modified_vars_ to do as little as possible on the next call.
117 modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables());
118 return true;
119}
120
122 CHECK_NE(var, kNoIntegerVariable);
123 if (var >= impacted_arcs_.size()) return true;
124 for (const ArcIndex arc_index : impacted_arcs_[var]) {
125 const ArcInfo& arc = arcs_[arc_index];
126 const IntegerValue new_head_lb =
127 integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc);
128 if (new_head_lb > integer_trail_->LowerBound(arc.head_var)) {
129 if (!EnqueueAndCheck(arc, new_head_lb, trail_)) return false;
130 }
131 }
132 return true;
133}
134
135// TODO(user): Remove literal fixed at level zero from there.
136void PrecedencesPropagator::PushConditionalRelations(const ArcInfo& arc) {
137 // We currently do not handle variable size in the reasons.
138 // TODO(user): we could easily take a level zero ArcOffset() instead, or
139 // add this to the reason though.
140 if (arc.offset_var != kNoIntegerVariable) return;
141 const IntegerValue offset = ArcOffset(arc);
142 relations_->PushConditionalRelation(
143 arc.presence_literals,
144 LinearExpression2::Difference(arc.tail_var, arc.head_var), -offset);
145}
146
147void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) {
148 if (propagation_trail_index_ > trail_index) {
149 // This means that we already propagated all there is to propagate
150 // at the level trail_index, so we can safely clear modified_vars_ in case
151 // it wasn't already done.
152 modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables());
153 }
154 while (propagation_trail_index_ > trail_index) {
155 const Literal literal = trail[--propagation_trail_index_];
156 if (literal.Index() >= literal_to_new_impacted_arcs_.size()) continue;
157 for (const ArcIndex arc_index :
158 literal_to_new_impacted_arcs_[literal.Index()]) {
159 if (arc_counts_[arc_index]++ == 0) {
160 const ArcInfo& arc = arcs_[arc_index];
161 impacted_arcs_[arc.tail_var].pop_back();
162 }
163 }
164 }
165}
166
167void PrecedencesPropagator::AdjustSizeFor(IntegerVariable i) {
168 const int index = std::max(i.value(), NegationOf(i).value());
169 if (index >= impacted_arcs_.size()) {
170 // TODO(user): only watch lower bound of the relevant variable instead
171 // of watching everything in [0, max_index_of_variable_used_in_this_class).
172 for (IntegerVariable var(impacted_arcs_.size()); var <= index; ++var) {
173 watcher_->WatchLowerBound(var, watcher_id_);
174 }
175 impacted_arcs_.resize(index + 1);
176 impacted_potential_arcs_.resize(index + 1);
177 }
178}
179
180void PrecedencesPropagator::AddArc(
181 IntegerVariable tail, IntegerVariable head, IntegerValue offset,
182 IntegerVariable offset_var, absl::Span<const Literal> presence_literals) {
183 AdjustSizeFor(tail);
184 AdjustSizeFor(head);
185 if (offset_var != kNoIntegerVariable) AdjustSizeFor(offset_var);
186
187 // This arc is present iff all the literals here are true.
188 absl::InlinedVector<Literal, 6> enforcement_literals;
189 {
190 for (const Literal l : presence_literals) {
191 enforcement_literals.push_back(l);
192 }
193 gtl::STLSortAndRemoveDuplicates(&enforcement_literals);
194
195 if (trail_->CurrentDecisionLevel() == 0) {
196 int new_size = 0;
197 for (const Literal l : enforcement_literals) {
198 if (trail_->Assignment().LiteralIsTrue(Literal(l))) {
199 continue; // At true, ignore this literal.
200 } else if (trail_->Assignment().LiteralIsFalse(Literal(l))) {
201 return; // At false, ignore completely this arc.
202 }
203 enforcement_literals[new_size++] = l;
204 }
205 enforcement_literals.resize(new_size);
206 }
207 }
208
209 if (head == tail) {
210 // A self-arc is either plain SAT or plain UNSAT or it forces something on
211 // the given offset_var or presence_literal_index. In any case it could be
212 // presolved in something more efficient.
213 VLOG(1) << "Self arc! This could be presolved. "
214 << "var:" << tail << " offset:" << offset
215 << " offset_var:" << offset_var
216 << " conditioned_by:" << presence_literals;
217 }
218
219 // Remove the offset_var if it is fixed.
220 // TODO(user): We should also handle the case where tail or head is fixed.
221 if (offset_var != kNoIntegerVariable) {
222 const IntegerValue lb = integer_trail_->LevelZeroLowerBound(offset_var);
223 if (lb == integer_trail_->LevelZeroUpperBound(offset_var)) {
224 offset += lb;
225 offset_var = kNoIntegerVariable;
226 }
227 }
228
229 // Deal first with impacted_potential_arcs_/potential_arcs_.
230 if (!enforcement_literals.empty()) {
231 const OptionalArcIndex arc_index(potential_arcs_.size());
232 potential_arcs_.push_back(
233 {tail, head, offset, offset_var, enforcement_literals});
234 impacted_potential_arcs_[tail].push_back(arc_index);
235 impacted_potential_arcs_[NegationOf(head)].push_back(arc_index);
236 if (offset_var != kNoIntegerVariable) {
237 impacted_potential_arcs_[offset_var].push_back(arc_index);
238 }
239 }
240
241 // Now deal with impacted_arcs_/arcs_.
242 struct InternalArc {
243 IntegerVariable tail_var;
244 IntegerVariable head_var;
245 IntegerVariable offset_var;
246 };
247 std::vector<InternalArc> to_add;
248 if (offset_var == kNoIntegerVariable) {
249 // a + offset <= b and -b + offset <= -a
250 to_add.push_back({tail, head, kNoIntegerVariable});
251 to_add.push_back({NegationOf(head), NegationOf(tail), kNoIntegerVariable});
252 } else {
253 // tail (a) and offset_var (b) are symmetric, so we add:
254 // - a + b + offset <= c
255 to_add.push_back({tail, head, offset_var});
256 to_add.push_back({offset_var, head, tail});
257 // - a - c + offset <= -b
258 to_add.push_back({tail, NegationOf(offset_var), NegationOf(head)});
259 to_add.push_back({NegationOf(head), NegationOf(offset_var), tail});
260 // - b - c + offset <= -a
261 to_add.push_back({offset_var, NegationOf(tail), NegationOf(head)});
262 to_add.push_back({NegationOf(head), NegationOf(tail), offset_var});
263 }
264 for (const InternalArc a : to_add) {
265 // Since we add a new arc, we will need to consider its tail during the next
266 // propagation. Note that the size of modified_vars_ will be automatically
267 // updated when new integer variables are created since we register it with
268 // IntegerTrail in this class constructor.
269 //
270 // TODO(user): Adding arcs and then calling Untrail() before Propagate()
271 // will cause this mecanism to break. Find a more robust implementation.
272 //
273 // TODO(user): In some rare corner case, rescanning the whole list of arc
274 // leaving tail_var can make AddVar() have a quadratic complexity where it
275 // shouldn't. A better solution would be to see if this new arc currently
276 // propagate something, and if it does, just update the lower bound of
277 // a.head_var and let the normal "is modified" mecanism handle any eventual
278 // follow up propagations.
279 modified_vars_.Set(a.tail_var);
280
281 // If a.head_var is optional, we can potentially remove some literal from
282 // enforcement_literals.
283 const ArcIndex arc_index(arcs_.size());
284 arcs_.push_back(
285 {a.tail_var, a.head_var, offset, a.offset_var, enforcement_literals});
286 auto& presence_literals = arcs_.back().presence_literals;
287
288 if (presence_literals.empty()) {
289 impacted_arcs_[a.tail_var].push_back(arc_index);
290 } else {
291 for (const Literal l : presence_literals) {
292 if (l.Index() >= literal_to_new_impacted_arcs_.size()) {
293 literal_to_new_impacted_arcs_.resize(l.Index().value() + 1);
294 }
295 literal_to_new_impacted_arcs_[l.Index()].push_back(arc_index);
296 }
297 }
298
299 if (trail_->CurrentDecisionLevel() == 0) {
300 arc_counts_.push_back(presence_literals.size());
301 } else {
302 arc_counts_.push_back(0);
303 for (const Literal l : presence_literals) {
304 if (!trail_->Assignment().LiteralIsTrue(l)) {
305 ++arc_counts_.back();
306 }
307 }
308 CHECK(presence_literals.empty() || arc_counts_.back() > 0);
309 }
310 }
311}
312
314 IntegerVariable i2,
315 IntegerValue offset) {
316 DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
317 if (i1 < impacted_arcs_.size() && i2 < impacted_arcs_.size()) {
318 for (const ArcIndex index : impacted_arcs_[i1]) {
319 const ArcInfo& arc = arcs_[index];
320 if (arc.head_var == i2) {
321 const IntegerValue current = ArcOffset(arc);
322 if (offset <= current) {
323 return false;
324 } else {
325 // TODO(user): Modify arc in place!
326 }
327 break;
328 }
329 }
330 }
331
332 AddPrecedenceWithOffset(i1, i2, offset);
333 return true;
334}
335
336// TODO(user): On jobshop problems with a lot of tasks per machine (500), this
337// takes up a big chunk of the running time even before we find a solution.
338// This is because, for each lower bound changed, we inspect 500 arcs even
339// though they will never be propagated because the other bound is still at the
340// horizon. Find an even sparser algorithm?
341bool PrecedencesPropagator::PropagateOptionalArcs(Trail* trail) {
342 for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) {
343 // The variables are not in increasing order, so we need to continue.
344 if (var >= impacted_potential_arcs_.size()) continue;
345
346 // Note that we can currently check the same ArcInfo up to 3 times, one for
347 // each of the arc variables: tail, NegationOf(head) and offset_var.
348 for (const OptionalArcIndex arc_index : impacted_potential_arcs_[var]) {
349 const ArcInfo& arc = potential_arcs_[arc_index];
350 int num_not_true = 0;
351 Literal to_propagate;
352 for (const Literal l : arc.presence_literals) {
353 if (!trail->Assignment().LiteralIsTrue(l)) {
354 ++num_not_true;
355 to_propagate = l;
356 }
357 }
358 if (num_not_true != 1) continue;
359 if (trail->Assignment().LiteralIsFalse(to_propagate)) continue;
360
361 // Test if this arc can be present or not.
362 // Important arc.tail_var can be different from var here.
363 const IntegerValue tail_lb = integer_trail_->LowerBound(arc.tail_var);
364 const IntegerValue head_ub = integer_trail_->UpperBound(arc.head_var);
365 if (tail_lb + ArcOffset(arc) > head_ub) {
366 integer_reason_.clear();
367 integer_reason_.push_back(
368 integer_trail_->LowerBoundAsLiteral(arc.tail_var));
369 integer_reason_.push_back(
370 integer_trail_->UpperBoundAsLiteral(arc.head_var));
371 AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_,
372 &integer_reason_);
373 literal_reason_.clear();
374 for (const Literal l : arc.presence_literals) {
375 if (l != to_propagate) literal_reason_.push_back(l.Negated());
376 }
377 ++num_enforcement_pushes_;
378 if (!integer_trail_->EnqueueLiteral(to_propagate.Negated(),
379 literal_reason_, integer_reason_)) {
380 return false;
381 }
382 }
383 }
384 }
385 return true;
386}
387
388IntegerValue PrecedencesPropagator::ArcOffset(const ArcInfo& arc) const {
389 return arc.offset + (arc.offset_var == kNoIntegerVariable
390 ? IntegerValue(0)
391 : integer_trail_->LowerBound(arc.offset_var));
392}
393
394bool PrecedencesPropagator::EnqueueAndCheck(const ArcInfo& arc,
395 IntegerValue new_head_lb,
396 Trail* trail) {
397 ++num_pushes_;
398 DCHECK_GT(new_head_lb, integer_trail_->LowerBound(arc.head_var));
399
400 // Compute the reason for new_head_lb.
401 //
402 // TODO(user): do like for clause and keep the negation of
403 // arc.presence_literals? I think we could change the integer.h API to accept
404 // true literal like for IntegerVariable, it is really confusing currently.
405 literal_reason_.clear();
406 for (const Literal l : arc.presence_literals) {
407 literal_reason_.push_back(l.Negated());
408 }
409
410 integer_reason_.clear();
411 integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(arc.tail_var));
412 AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_,
413 &integer_reason_);
414
415 // The code works without this block since Enqueue() below can already take
416 // care of conflicts. However, it is better to deal with the conflict
417 // ourselves because we can be smarter about the reason this way.
418 //
419 // The reason for a "precedence" conflict is always a linear reason
420 // involving the tail lower_bound, the head upper bound and eventually the
421 // size lower bound. Because of that, we can use the RelaxLinearReason()
422 // code.
423 if (new_head_lb > integer_trail_->UpperBound(arc.head_var)) {
424 const IntegerValue slack =
425 new_head_lb - integer_trail_->UpperBound(arc.head_var) - 1;
426 integer_reason_.push_back(
427 integer_trail_->UpperBoundAsLiteral(arc.head_var));
428 std::vector<IntegerValue> coeffs(integer_reason_.size(), IntegerValue(1));
429 integer_trail_->RelaxLinearReason(slack, coeffs, &integer_reason_);
430 return integer_trail_->ReportConflict(literal_reason_, integer_reason_);
431 }
432
433 return integer_trail_->Enqueue(
434 IntegerLiteral::GreaterOrEqual(arc.head_var, new_head_lb),
435 literal_reason_, integer_reason_);
436}
437
438bool PrecedencesPropagator::NoPropagationLeft(const Trail& trail) const {
439 const int num_nodes = impacted_arcs_.size();
440 for (IntegerVariable var(0); var < num_nodes; ++var) {
441 for (const ArcIndex arc_index : impacted_arcs_[var]) {
442 const ArcInfo& arc = arcs_[arc_index];
443 if (integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc) >
444 integer_trail_->LowerBound(arc.head_var)) {
445 return false;
446 }
447 }
448 }
449 return true;
450}
451
452void PrecedencesPropagator::InitializeBFQueueWithModifiedNodes() {
453 // Sparse clear of the queue. TODO(user): only use the sparse version if
454 // queue.size() is small or use SparseBitset.
455 const int num_nodes = impacted_arcs_.size();
456 bf_in_queue_.resize(num_nodes, false);
457 for (const int node : bf_queue_) bf_in_queue_[node] = false;
458 bf_queue_.clear();
459 DCHECK(std::none_of(bf_in_queue_.begin(), bf_in_queue_.end(),
460 [](bool v) { return v; }));
461 for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) {
462 if (var >= num_nodes) continue;
463 bf_queue_.push_back(var.value());
464 bf_in_queue_[var.value()] = true;
465 }
466}
467
468void PrecedencesPropagator::CleanUpMarkedArcsAndParents() {
469 // To be sparse, we use the fact that each node with a parent must be in
470 // modified_vars_.
471 const int num_nodes = impacted_arcs_.size();
472 for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) {
473 if (var >= num_nodes) continue;
474 const ArcIndex parent_arc_index = bf_parent_arc_of_[var.value()];
475 if (parent_arc_index != -1) {
476 arcs_[parent_arc_index].is_marked = false;
477 bf_parent_arc_of_[var.value()] = -1;
478 bf_can_be_skipped_[var.value()] = false;
479 }
480 }
481 DCHECK(std::none_of(bf_parent_arc_of_.begin(), bf_parent_arc_of_.end(),
482 [](ArcIndex v) { return v != -1; }));
483 DCHECK(std::none_of(bf_can_be_skipped_.begin(), bf_can_be_skipped_.end(),
484 [](bool v) { return v; }));
485}
486
487bool PrecedencesPropagator::DisassembleSubtree(
488 int source, int target, std::vector<bool>* can_be_skipped) {
489 // Note that we explore a tree, so we can do it in any order, and the one
490 // below seems to be the fastest.
491 tmp_vector_.clear();
492 tmp_vector_.push_back(source);
493 while (!tmp_vector_.empty()) {
494 const int tail = tmp_vector_.back();
495 tmp_vector_.pop_back();
496 for (const ArcIndex arc_index : impacted_arcs_[IntegerVariable(tail)]) {
497 const ArcInfo& arc = arcs_[arc_index];
498 if (arc.is_marked) {
499 arc.is_marked = false; // mutable.
500 if (arc.head_var.value() == target) return true;
501 DCHECK(!(*can_be_skipped)[arc.head_var.value()]);
502 (*can_be_skipped)[arc.head_var.value()] = true;
503 tmp_vector_.push_back(arc.head_var.value());
504 }
505 }
506 }
507 return false;
508}
509
510void PrecedencesPropagator::AnalyzePositiveCycle(
511 ArcIndex first_arc, Trail* trail, std::vector<Literal>* must_be_all_true,
512 std::vector<Literal>* literal_reason,
513 std::vector<IntegerLiteral>* integer_reason) {
514 must_be_all_true->clear();
515 literal_reason->clear();
516 integer_reason->clear();
517
518 // Follow bf_parent_arc_of_[] to find the cycle containing first_arc.
519 const IntegerVariable first_arc_head = arcs_[first_arc].head_var;
520 ArcIndex arc_index = first_arc;
521 std::vector<ArcIndex> arc_on_cycle;
522
523 // Just to be safe and avoid an infinite loop we use the fact that the maximum
524 // cycle size on a graph with n nodes is of size n. If we have more in the
525 // code below, it means first_arc is not part of a cycle according to
526 // bf_parent_arc_of_[], which should never happen.
527 const int num_nodes = impacted_arcs_.size();
528 while (arc_on_cycle.size() <= num_nodes) {
529 arc_on_cycle.push_back(arc_index);
530 const ArcInfo& arc = arcs_[arc_index];
531 if (arc.tail_var == first_arc_head) break;
532 arc_index = bf_parent_arc_of_[arc.tail_var.value()];
533 CHECK_NE(arc_index, ArcIndex(-1));
534 }
535 CHECK_NE(arc_on_cycle.size(), num_nodes + 1) << "Infinite loop.";
536
537 // Compute the reason for this cycle.
538 IntegerValue sum(0);
539 for (const ArcIndex arc_index : arc_on_cycle) {
540 const ArcInfo& arc = arcs_[arc_index];
541 sum += ArcOffset(arc);
542 AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_,
543 integer_reason);
544 for (const Literal l : arc.presence_literals) {
545 literal_reason->push_back(l.Negated());
546 }
547 }
548
549 // TODO(user): what if the sum overflow? this is just a check so I guess
550 // we don't really care, but fix the issue.
551 CHECK_GT(sum, 0);
552}
553
554// Note that in our settings it is important to use an algorithm that tries to
555// minimize the number of integer_trail_->Enqueue() as much as possible.
556//
557// TODO(user): The current algorithm is quite efficient, but there is probably
558// still room for improvements.
559bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) {
560 const int num_nodes = impacted_arcs_.size();
561
562 // These vector are reset by CleanUpMarkedArcsAndParents() so resize is ok.
563 bf_can_be_skipped_.resize(num_nodes, false);
564 bf_parent_arc_of_.resize(num_nodes, ArcIndex(-1));
565 const auto cleanup =
566 ::absl::MakeCleanup([this]() { CleanUpMarkedArcsAndParents(); });
567
568 // The queue initialization is done by InitializeBFQueueWithModifiedNodes().
569 while (!bf_queue_.empty()) {
570 const int node = bf_queue_.front();
571 bf_queue_.pop_front();
572 bf_in_queue_[node] = false;
573
574 // TODO(user): we don't need bf_can_be_skipped_ since we can detect this
575 // if this node has a parent arc which is not marked. Investigate if it is
576 // faster without the vector<bool>.
577 //
578 // TODO(user): An alternative algorithm is to remove all these nodes from
579 // the queue instead of simply marking them. This should also lead to a
580 // better "relaxation" order of the arcs. It is however a bit more work to
581 // remove them since we need to track their position.
582 if (bf_can_be_skipped_[node]) {
583 DCHECK_NE(bf_parent_arc_of_[node], -1);
584 DCHECK(!arcs_[bf_parent_arc_of_[node]].is_marked);
585 continue;
586 }
587
588 const IntegerValue tail_lb =
589 integer_trail_->LowerBound(IntegerVariable(node));
590 for (const ArcIndex arc_index : impacted_arcs_[IntegerVariable(node)]) {
591 const ArcInfo& arc = arcs_[arc_index];
592 DCHECK_EQ(arc.tail_var, node);
593 const IntegerValue candidate = tail_lb + ArcOffset(arc);
594 if (candidate > integer_trail_->LowerBound(arc.head_var)) {
595 if (!EnqueueAndCheck(arc, candidate, trail)) return false;
596
597 // This is the Tarjan contribution to Bellman-Ford. This code detect
598 // positive cycle, and because it disassemble the subtree while doing
599 // so, the cost is amortized during the algorithm execution. Another
600 // advantages is that it will mark the node explored here as skippable
601 // which will avoid to propagate them too early (knowing that they will
602 // need to be propagated again later).
603 if (DisassembleSubtree(arc.head_var.value(), arc.tail_var.value(),
604 &bf_can_be_skipped_)) {
605 std::vector<Literal> must_be_all_true;
606 AnalyzePositiveCycle(arc_index, trail, &must_be_all_true,
607 &literal_reason_, &integer_reason_);
608 if (must_be_all_true.empty()) {
609 ++num_cycles_;
610 return integer_trail_->ReportConflict(literal_reason_,
611 integer_reason_);
612 } else {
613 gtl::STLSortAndRemoveDuplicates(&must_be_all_true);
614 for (const Literal l : must_be_all_true) {
615 if (trail_->Assignment().LiteralIsFalse(l)) {
616 literal_reason_.push_back(l);
617 return integer_trail_->ReportConflict(literal_reason_,
618 integer_reason_);
619 }
620 }
621 for (const Literal l : must_be_all_true) {
622 if (trail_->Assignment().LiteralIsTrue(l)) continue;
623 if (!integer_trail_->EnqueueLiteral(l, literal_reason_,
624 integer_reason_)) {
625 return false;
626 }
627 }
628
629 // We just marked some optional variable as ignored, no need
630 // to update bf_parent_arc_of_[].
631 continue;
632 }
633 }
634
635 // We need to enforce the invariant that only the arc_index in
636 // bf_parent_arc_of_[] are marked (but not necessarily all of them
637 // since we unmark some in DisassembleSubtree()).
638 if (bf_parent_arc_of_[arc.head_var.value()] != -1) {
639 arcs_[bf_parent_arc_of_[arc.head_var.value()]].is_marked = false;
640 }
641
642 // Tricky: We just enqueued the fact that the lower-bound of head is
643 // candidate. However, because the domain of head may be discrete, it is
644 // possible that the lower-bound of head is now higher than candidate!
645 // If this is the case, we don't update bf_parent_arc_of_[] so that we
646 // don't wrongly detect a positive weight cycle because of this "extra
647 // push".
648 const IntegerValue new_bound = integer_trail_->LowerBound(arc.head_var);
649 if (new_bound == candidate) {
650 bf_parent_arc_of_[arc.head_var.value()] = arc_index;
651 arcs_[arc_index].is_marked = true;
652 } else {
653 // We still unmark any previous dependency, since we have pushed the
654 // value of arc.head_var further.
655 bf_parent_arc_of_[arc.head_var.value()] = -1;
656 }
657
658 // We do not re-enqueue if we are in a propagation loop and new_bound
659 // was not pushed to candidate or higher.
660 bf_can_be_skipped_[arc.head_var.value()] = false;
661 if (!bf_in_queue_[arc.head_var.value()] && new_bound >= candidate) {
662 bf_queue_.push_back(arc.head_var.value());
663 bf_in_queue_[arc.head_var.value()] = true;
664 }
665 }
666 }
667 }
668 return true;
669}
670
671} // namespace sat
672} // namespace operations_research
const std::vector< IntegerType > & PositionsSetAtLeastOnce() const
Definition bitset.h:905
void PushConditionalRelation(absl::Span< const Literal > enforcements, LinearExpression2Index index, IntegerValue rhs)
void WatchLowerBound(IntegerVariable var, int id, int watch_index=-1)
Definition integer.h:1716
IntegerValue LowerBound(IntegerVariable i) const
Definition integer.h:1537
IntegerLiteral UpperBoundAsLiteral(IntegerVariable i) const
Definition integer.h:1585
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1541
IntegerLiteral LowerBoundAsLiteral(IntegerVariable i) const
Definition integer.h:1580
LiteralIndex Index() const
Definition sat_base.h:92
bool AddPrecedenceWithOffsetIfNew(IntegerVariable i1, IntegerVariable i2, IntegerValue offset)
void AddPrecedenceWithOffset(IntegerVariable i1, IntegerVariable i2, IntegerValue offset)
void Untrail(const Trail &trail, int trail_index) final
const VariablesAssignment & Assignment() const
Definition sat_base.h:654
bool LiteralIsFalse(Literal literal) const
Definition sat_base.h:203
bool LiteralIsTrue(Literal literal) const
Definition sat_base.h:206
void resize(size_type new_size)
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition stl_util.h:55
std::function< int64_t(const Model &)> LowerBound(IntegerVariable v)
Definition integer.h:1819
std::vector< IntegerVariable > NegationOf(absl::Span< const IntegerVariable > vars)
Definition integer.cc:52
const IntegerVariable kNoIntegerVariable(-1)
OR-Tools root namespace.
static IntegerLiteral GreaterOrEqual(IntegerVariable i, IntegerValue bound)
static LinearExpression2 Difference(IntegerVariable v1, IntegerVariable v2)