Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
all_different.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <functional>
19#include <limits>
20#include <utility>
21#include <vector>
22
23#include "absl/container/btree_map.h"
24#include "absl/log/check.h"
25#include "absl/types/span.h"
28#include "ortools/sat/integer.h"
29#include "ortools/sat/model.h"
32#include "ortools/util/sort.h"
34
35namespace operations_research {
36namespace sat {
37
38std::function<void(Model*)> AllDifferentBinary(
39 const std::vector<IntegerVariable>& vars) {
40 return [=](Model* model) {
41 // Fully encode all the given variables and construct a mapping value ->
42 // List of literal each indicating that a given variable takes this value.
43 //
44 // Note that we use a map to always add the constraints in the same order.
45 absl::btree_map<IntegerValue, std::vector<Literal>> value_to_literals;
46 IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
47 for (const IntegerVariable var : vars) {
49 for (const auto& entry : encoder->FullDomainEncoding(var)) {
50 value_to_literals[entry.value].push_back(entry.literal);
51 }
52 }
53
54 // Add an at most one constraint for each value.
55 for (const auto& entry : value_to_literals) {
56 if (entry.second.size() > 1) {
57 model->Add(AtMostOneConstraint(entry.second));
58 }
59 }
60
61 // If the number of values is equal to the number of variables, we have
62 // a permutation. We can add a bool_or for each literals attached to a
63 // value.
64 if (value_to_literals.size() == vars.size()) {
65 for (const auto& entry : value_to_literals) {
66 model->Add(ClauseConstraint(entry.second));
67 }
68 }
69 };
70}
71
72std::function<void(Model*)> AllDifferentOnBounds(
73 const std::vector<AffineExpression>& expressions) {
74 return [=](Model* model) {
75 if (expressions.empty()) return;
76 auto* constraint = new AllDifferentBoundsPropagator(
77 expressions, model->GetOrCreate<IntegerTrail>());
78 constraint->RegisterWith(model->GetOrCreate<GenericLiteralWatcher>());
79 model->TakeOwnership(constraint);
80 };
81}
82
83std::function<void(Model*)> AllDifferentOnBounds(
84 const std::vector<IntegerVariable>& vars) {
85 return [=](Model* model) {
86 if (vars.empty()) return;
87 std::vector<AffineExpression> expressions;
88 expressions.reserve(vars.size());
89 for (const IntegerVariable var : vars) {
90 expressions.push_back(AffineExpression(var));
91 }
92 auto* constraint = new AllDifferentBoundsPropagator(
93 expressions, model->GetOrCreate<IntegerTrail>());
94 constraint->RegisterWith(model->GetOrCreate<GenericLiteralWatcher>());
95 model->TakeOwnership(constraint);
96 };
97}
98
99std::function<void(Model*)> AllDifferentAC(
100 const std::vector<IntegerVariable>& variables) {
101 return [=](Model* model) {
102 if (variables.size() < 3) return;
103
105 variables, model->GetOrCreate<IntegerEncoder>(),
106 model->GetOrCreate<Trail>(), model->GetOrCreate<IntegerTrail>());
107 constraint->RegisterWith(model->GetOrCreate<GenericLiteralWatcher>());
108 model->TakeOwnership(constraint);
109 };
110}
111
113 std::vector<IntegerVariable> variables, IntegerEncoder* encoder,
114 Trail* trail, IntegerTrail* integer_trail)
115 : num_variables_(variables.size()),
116 variables_(std::move(variables)),
117 trail_(trail),
118 integer_trail_(integer_trail) {
119 // Initialize literals cache.
120 int64_t min_value = std::numeric_limits<int64_t>::max();
121 int64_t max_value = std::numeric_limits<int64_t>::min();
122 variable_min_value_.resize(num_variables_);
123 variable_max_value_.resize(num_variables_);
124 variable_literal_index_.resize(num_variables_);
125 int num_fixed_variables = 0;
126 for (int x = 0; x < num_variables_; x++) {
127 variable_min_value_[x] = integer_trail_->LowerBound(variables_[x]).value();
128 variable_max_value_[x] = integer_trail_->UpperBound(variables_[x]).value();
129
130 // Compute value range of all variables.
131 min_value = std::min(min_value, variable_min_value_[x]);
132 max_value = std::max(max_value, variable_max_value_[x]);
133
134 // FullyEncode does not like 1-value domains, handle this case first.
135 // TODO(user): Prune now, ignore these variables during solving.
136 if (variable_min_value_[x] == variable_max_value_[x]) {
137 num_fixed_variables++;
138 variable_literal_index_[x].push_back(kTrueLiteralIndex);
139 continue;
140 }
141
142 // Force full encoding if not already done.
143 if (!encoder->VariableIsFullyEncoded(variables_[x])) {
144 encoder->FullyEncodeVariable(variables_[x]);
145 }
146
147 // Fill cache with literals, default value is kFalseLiteralIndex.
148 int64_t size = variable_max_value_[x] - variable_min_value_[x] + 1;
149 variable_literal_index_[x].resize(size, kFalseLiteralIndex);
150 for (const auto& entry : encoder->FullDomainEncoding(variables_[x])) {
151 int64_t value = entry.value.value();
152 // Can happen because of initial propagation!
153 if (value < variable_min_value_[x] || variable_max_value_[x] < value) {
154 continue;
155 }
156 variable_literal_index_[x][value - variable_min_value_[x]] =
157 entry.literal.Index();
158 }
159 }
160 min_all_values_ = min_value;
161 num_all_values_ = max_value - min_value + 1;
162
163 successor_.resize(num_variables_);
164 variable_to_value_.assign(num_variables_, -1);
165 visiting_.resize(num_variables_);
166 variable_visited_from_.resize(num_variables_);
167 residual_graph_successors_.resize(num_variables_ + num_all_values_ + 1);
168 component_number_.resize(num_variables_ + num_all_values_ + 1);
169}
170
172 const int id = watcher->Register(this);
173 watcher->SetPropagatorPriority(id, 2);
174 for (const auto& literal_indices : variable_literal_index_) {
175 for (const LiteralIndex li : literal_indices) {
176 // Watch only unbound literals.
177 if (li >= 0 &&
178 !trail_->Assignment().VariableIsAssigned(Literal(li).Variable())) {
179 watcher->WatchLiteral(Literal(li), id);
180 watcher->WatchLiteral(Literal(li).Negated(), id);
181 }
182 }
183 }
184}
185
186LiteralIndex AllDifferentConstraint::VariableLiteralIndexOf(int x,
187 int64_t value) {
188 return (value < variable_min_value_[x] || variable_max_value_[x] < value)
190 : variable_literal_index_[x][value - variable_min_value_[x]];
191}
192
193inline bool AllDifferentConstraint::VariableHasPossibleValue(int x,
194 int64_t value) {
195 LiteralIndex li = VariableLiteralIndexOf(x, value);
196 if (li == kFalseLiteralIndex) return false;
197 if (li == kTrueLiteralIndex) return true;
198 DCHECK_GE(li, 0);
199 return !trail_->Assignment().LiteralIsFalse(Literal(li));
200}
201
202bool AllDifferentConstraint::MakeAugmentingPath(int start) {
203 // Do a BFS and use visiting_ as a queue, with num_visited pointing
204 // at its begin() and num_to_visit its end().
205 // To switch to the augmenting path once a nonmatched value was found,
206 // we remember the BFS tree in variable_visited_from_.
207 int num_to_visit = 0;
208 int num_visited = 0;
209 // Enqueue start.
210 visiting_[num_to_visit++] = start;
211 variable_visited_[start] = true;
212 variable_visited_from_[start] = -1;
213
214 while (num_visited < num_to_visit) {
215 // Dequeue node to visit.
216 const int node = visiting_[num_visited++];
217
218 for (const int value : successor_[node]) {
219 if (value_visited_[value]) continue;
220 value_visited_[value] = true;
221 if (value_to_variable_[value] == -1) {
222 // value is not matched: change path from node to start, and return.
223 int path_node = node;
224 int path_value = value;
225 while (path_node != -1) {
226 int old_value = variable_to_value_[path_node];
227 variable_to_value_[path_node] = path_value;
228 value_to_variable_[path_value] = path_node;
229 path_node = variable_visited_from_[path_node];
230 path_value = old_value;
231 }
232 return true;
233 } else {
234 // Enqueue node matched to value.
235 const int next_node = value_to_variable_[value];
236 variable_visited_[next_node] = true;
237 visiting_[num_to_visit++] = next_node;
238 variable_visited_from_[next_node] = node;
239 }
240 }
241 }
242 return false;
243}
244
245// The algorithm copies the solver state to successor_, which is used to compute
246// a matching. If all variables can be matched, it generates the residual graph
247// in separate vectors, computes its SCCs, and filters variable -> value if
248// variable is not in the same SCC as value.
249// Explanations for failure and filtering are fine-grained:
250// failure is explained by a Hall set, i.e. dom(variables) \subseteq {values},
251// with |variables| < |values|; filtering is explained by the Hall set that
252// would happen if the variable was assigned to the value.
253//
254// TODO(user): If needed, there are several ways performance could be
255// improved.
256// If copying the variable state is too costly, it could be maintained instead.
257// If the propagator has too many fruitless calls (without failing/pruning),
258// we can remember the O(n) arcs used in the matching and the SCC decomposition,
259// and guard calls to Propagate() if these arcs are still valid.
261 // Copy variable state to graph state.
262 prev_matching_ = variable_to_value_;
263 value_to_variable_.assign(num_all_values_, -1);
264 variable_to_value_.assign(num_variables_, -1);
265 for (int x = 0; x < num_variables_; x++) {
266 successor_[x].clear();
267 const int64_t min_value = integer_trail_->LowerBound(variables_[x]).value();
268 const int64_t max_value = integer_trail_->UpperBound(variables_[x]).value();
269 for (int64_t value = min_value; value <= max_value; value++) {
270 if (VariableHasPossibleValue(x, value)) {
271 const int offset_value = value - min_all_values_;
272 // Forward-checking should propagate x != value.
273 successor_[x].push_back(offset_value);
274 }
275 }
276 if (successor_[x].size() == 1) {
277 const int offset_value = successor_[x][0];
278 if (value_to_variable_[offset_value] == -1) {
279 value_to_variable_[offset_value] = x;
280 variable_to_value_[x] = offset_value;
281 }
282 }
283 }
284
285 // Because we currently propagates all clauses before entering this
286 // propagator, we known that this can't happen.
287 if (DEBUG_MODE) {
288 for (int x = 0; x < num_variables_; x++) {
289 for (const int offset_value : successor_[x]) {
290 if (value_to_variable_[offset_value] != -1 &&
291 value_to_variable_[offset_value] != x) {
292 LOG(FATAL) << "Should have been propagated by AllDifferentBinary()!";
293 }
294 }
295 }
296 }
297
298 // Seed with previous matching.
299 for (int x = 0; x < num_variables_; x++) {
300 if (variable_to_value_[x] != -1) continue;
301 const int prev_value = prev_matching_[x];
302 if (prev_value == -1 || value_to_variable_[prev_value] != -1) continue;
303
304 if (VariableHasPossibleValue(x, prev_matching_[x] + min_all_values_)) {
305 variable_to_value_[x] = prev_matching_[x];
306 value_to_variable_[prev_matching_[x]] = x;
307 }
308 }
309
310 // Compute max matching.
311 int x = 0;
312 for (; x < num_variables_; x++) {
313 if (variable_to_value_[x] == -1) {
314 value_visited_.assign(num_all_values_, false);
315 variable_visited_.assign(num_variables_, false);
316 MakeAugmentingPath(x);
317 }
318 if (variable_to_value_[x] == -1) break; // No augmenting path exists.
319 }
320
321 // Fail if covering variables impossible.
322 // Explain with the forbidden parts of the graph that prevent
323 // MakeAugmentingPath from increasing the matching size.
324 if (x < num_variables_) {
325 // For now explain all forbidden arcs.
326 std::vector<Literal>* conflict = trail_->MutableConflict();
327 conflict->clear();
328 for (int y = 0; y < num_variables_; y++) {
329 if (!variable_visited_[y]) continue;
330 for (int value = variable_min_value_[y]; value <= variable_max_value_[y];
331 value++) {
332 const LiteralIndex li = VariableLiteralIndexOf(y, value);
333 if (li >= 0 && !value_visited_[value - min_all_values_]) {
334 DCHECK(trail_->Assignment().LiteralIsFalse(Literal(li)));
335 conflict->push_back(Literal(li));
336 }
337 }
338 }
339 return false;
340 }
341
342 // The current matching is a valid solution, now try to filter values.
343 // Build residual graph, compute its SCCs.
344 for (int x = 0; x < num_variables_; x++) {
345 residual_graph_successors_[x].clear();
346 for (const int succ : successor_[x]) {
347 if (succ != variable_to_value_[x]) {
348 residual_graph_successors_[x].push_back(num_variables_ + succ);
349 }
350 }
351 }
352 for (int offset_value = 0; offset_value < num_all_values_; offset_value++) {
353 residual_graph_successors_[num_variables_ + offset_value].clear();
354 if (value_to_variable_[offset_value] != -1) {
355 residual_graph_successors_[num_variables_ + offset_value].push_back(
356 value_to_variable_[offset_value]);
357 }
358 }
359 const int dummy_node = num_variables_ + num_all_values_;
360 residual_graph_successors_[dummy_node].clear();
361 if (num_variables_ < num_all_values_) {
362 for (int x = 0; x < num_variables_; x++) {
363 residual_graph_successors_[dummy_node].push_back(x);
364 }
365 for (int offset_value = 0; offset_value < num_all_values_; offset_value++) {
366 if (value_to_variable_[offset_value] == -1) {
367 residual_graph_successors_[num_variables_ + offset_value].push_back(
368 dummy_node);
369 }
370 }
371 }
372
373 // Compute SCCs, make node -> component map.
374 struct SccOutput {
375 explicit SccOutput(std::vector<int>* c) : components(c) {}
376 void emplace_back(int const* b, int const* e) {
377 for (int const* it = b; it < e; ++it) {
378 (*components)[*it] = num_components;
379 }
380 ++num_components;
381 }
382 int num_components = 0;
383 std::vector<int>* components;
384 };
385 SccOutput scc_output(&component_number_);
387 static_cast<int>(residual_graph_successors_.size()),
388 residual_graph_successors_, &scc_output);
389
390 // Remove arcs var -> val where SCC(var) -/->* SCC(val).
391 for (int x = 0; x < num_variables_; x++) {
392 if (successor_[x].size() == 1) continue;
393 for (const int offset_value : successor_[x]) {
394 const int value_node = offset_value + num_variables_;
395 if (variable_to_value_[x] != offset_value &&
396 component_number_[x] != component_number_[value_node] &&
397 VariableHasPossibleValue(x, offset_value + min_all_values_)) {
398 // We can deduce that x != value. To explain, force x == offset_value,
399 // then find another assignment for the variable matched to
400 // offset_value. It will fail: explaining why is the same as
401 // explaining failure as above, and it is an explanation of x != value.
402 value_visited_.assign(num_all_values_, false);
403 variable_visited_.assign(num_variables_, false);
404 // Undo x -> old_value and old_variable -> offset_value.
405 const int old_variable = value_to_variable_[offset_value];
406 variable_to_value_[old_variable] = -1;
407 const int old_value = variable_to_value_[x];
408 value_to_variable_[old_value] = -1;
409 variable_to_value_[x] = offset_value;
410 value_to_variable_[offset_value] = x;
411
412 value_visited_[offset_value] = true;
413 MakeAugmentingPath(old_variable);
414 DCHECK_EQ(variable_to_value_[old_variable], -1); // No reassignment.
415
416 std::vector<Literal>* reason = trail_->GetEmptyVectorToStoreReason();
417 for (int y = 0; y < num_variables_; y++) {
418 if (!variable_visited_[y]) continue;
419 for (int value = variable_min_value_[y];
420 value <= variable_max_value_[y]; value++) {
421 const LiteralIndex li = VariableLiteralIndexOf(y, value);
422 if (li >= 0 && !value_visited_[value - min_all_values_]) {
423 DCHECK(!VariableHasPossibleValue(y, value));
424 reason->push_back(Literal(li));
425 }
426 }
427 }
428
429 const LiteralIndex li =
430 VariableLiteralIndexOf(x, offset_value + min_all_values_);
431 DCHECK_NE(li, kTrueLiteralIndex);
432 DCHECK_NE(li, kFalseLiteralIndex);
433 return trail_->EnqueueWithStoredReason(Literal(li).Negated());
434 }
435 }
436 }
437
438 return true;
439}
440
442 const std::vector<AffineExpression>& expressions,
443 IntegerTrail* integer_trail)
444 : integer_trail_(integer_trail) {
445 CHECK(!expressions.empty());
446
447 // We need +2 for sentinels.
448 const int capacity = expressions.size() + 2;
449 index_to_start_index_.resize(capacity);
450 index_to_end_index_.resize(capacity);
451 index_is_present_.resize(capacity, false);
452 index_to_expr_.resize(capacity, kNoIntegerVariable);
453
454 for (int i = 0; i < expressions.size(); ++i) {
455 bounds_.push_back({expressions[i]});
456 negated_bounds_.push_back({expressions[i].Negated()});
457 }
458}
459
461 if (!PropagateLowerBounds()) return false;
462
463 // Note that it is not required to swap back bounds_ and negated_bounds_.
464 // TODO(user): investigate the impact.
465 std::swap(bounds_, negated_bounds_);
466 const bool result = PropagateLowerBounds();
467 std::swap(bounds_, negated_bounds_);
468 return result;
469}
470
471void AllDifferentBoundsPropagator::FillHallReason(IntegerValue hall_lb,
472 IntegerValue hall_ub) {
473 integer_reason_.clear();
474 const int limit = GetIndex(hall_ub);
475 for (int i = GetIndex(hall_lb); i <= limit; ++i) {
476 const AffineExpression expr = index_to_expr_[i];
477 integer_reason_.push_back(expr.GreaterOrEqual(hall_lb));
478 integer_reason_.push_back(expr.LowerOrEqual(hall_ub));
479 }
480}
481
482int AllDifferentBoundsPropagator::FindStartIndexAndCompressPath(int index) {
483 // First, walk the pointer until we find one pointing to itself.
484 int start_index = index;
485 while (true) {
486 const int next = index_to_start_index_[start_index];
487 if (start_index == next) break;
488 start_index = next;
489 }
490
491 // Second, redo the same thing and make everyone point to the representative.
492 while (true) {
493 const int next = index_to_start_index_[index];
494 if (start_index == next) break;
495 index_to_start_index_[index] = start_index;
496 index = next;
497 }
498 return start_index;
499}
500
501bool AllDifferentBoundsPropagator::PropagateLowerBounds() {
502 // Start by filling the cached bounds and sorting by increasing lb.
503 for (CachedBounds& entry : bounds_) {
504 entry.lb = integer_trail_->LowerBound(entry.expr);
505 entry.ub = integer_trail_->UpperBound(entry.expr);
506 }
507 IncrementalSort(bounds_.begin(), bounds_.end(),
508 [](CachedBounds a, CachedBounds b) { return a.lb < b.lb; });
509
510 // We will split the affine epressions in vars sorted by lb in contiguous
511 // subset with index of the form [start, start + num_in_window).
512 int start = 0;
513 int num_in_window = 1;
514
515 // Minimum lower bound in the current window.
516 IntegerValue min_lb = bounds_.front().lb;
517
518 const int size = bounds_.size();
519 for (int i = 1; i < size; ++i) {
520 const IntegerValue lb = bounds_[i].lb;
521
522 // If the lower bounds of all the other variables is greater, then it can
523 // never fall into a potential hall interval formed by the variable in the
524 // current window, so we can split the problem into independent parts.
525 if (lb <= min_lb + IntegerValue(num_in_window - 1)) {
526 ++num_in_window;
527 continue;
528 }
529
530 // Process the current window.
531 if (num_in_window > 1) {
532 absl::Span<CachedBounds> window(&bounds_[start], num_in_window);
533 if (!PropagateLowerBoundsInternal(min_lb, window)) {
534 return false;
535 }
536 }
537
538 // Start of the next window.
539 start = i;
540 num_in_window = 1;
541 min_lb = lb;
542 }
543
544 // Take care of the last window.
545 if (num_in_window > 1) {
546 absl::Span<CachedBounds> window(&bounds_[start], num_in_window);
547 return PropagateLowerBoundsInternal(min_lb, window);
548 }
549
550 return true;
551}
552
553bool AllDifferentBoundsPropagator::PropagateLowerBoundsInternal(
554 IntegerValue min_lb, absl::Span<CachedBounds> bounds) {
555 hall_starts_.clear();
556 hall_ends_.clear();
557
558 // All cached lb in bounds will be in [min_lb, min_lb + bounds_.size()).
559 // Make sure we change our base_ so that GetIndex() fit in our buffers.
560 base_ = min_lb - IntegerValue(1);
561
562 // Sparse cleaning of index_is_present_.
563 for (const int i : indices_to_clear_) {
564 index_is_present_[i] = false;
565 }
566 indices_to_clear_.clear();
567
568 // Sort bounds by increasing ub.
569 std::sort(bounds.begin(), bounds.end(),
570 [](CachedBounds a, CachedBounds b) { return a.ub < b.ub; });
571 for (const CachedBounds entry : bounds) {
572 const AffineExpression expr = entry.expr;
573
574 // Note that it is important to use the cache to make sure GetIndex() is
575 // not out of bound in case integer_trail_->LowerBound() changed when we
576 // pushed something.
577 const IntegerValue lb = entry.lb;
578 const int lb_index = GetIndex(lb);
579 const bool value_is_covered = index_is_present_[lb_index];
580
581 // Check if lb is in an Hall interval, and push it if this is the case.
582 if (value_is_covered) {
583 const int hall_index =
584 std::lower_bound(hall_ends_.begin(), hall_ends_.end(), lb) -
585 hall_ends_.begin();
586 if (hall_index < hall_ends_.size() && hall_starts_[hall_index] <= lb) {
587 const IntegerValue hs = hall_starts_[hall_index];
588 const IntegerValue he = hall_ends_[hall_index];
589 FillHallReason(hs, he);
590 integer_reason_.push_back(expr.GreaterOrEqual(hs));
591 if (!integer_trail_->SafeEnqueue(expr.GreaterOrEqual(he + 1),
592 integer_reason_)) {
593 return false;
594 }
595 }
596 }
597
598 // Update our internal representation of the non-consecutive intervals.
599 //
600 // If lb is not used, we add a node there, otherwise we add it to the
601 // right of the interval that contains lb. In both cases, if there is an
602 // interval to the left (resp. right) we merge them.
603 int new_index = lb_index;
604 int start_index = lb_index;
605 int end_index = lb_index;
606 if (value_is_covered) {
607 start_index = FindStartIndexAndCompressPath(new_index);
608 new_index = index_to_end_index_[start_index] + 1;
609 end_index = new_index;
610 } else {
611 if (index_is_present_[new_index - 1]) {
612 start_index = FindStartIndexAndCompressPath(new_index - 1);
613 }
614 }
615 if (index_is_present_[new_index + 1]) {
616 end_index = index_to_end_index_[new_index + 1];
617 index_to_start_index_[new_index + 1] = start_index;
618 }
619
620 // Update the end of the representative.
621 index_to_end_index_[start_index] = end_index;
622
623 // This is the only place where we "add" a new node.
624 {
625 index_to_start_index_[new_index] = start_index;
626 index_to_expr_[new_index] = expr;
627 index_is_present_[new_index] = true;
628 indices_to_clear_.push_back(new_index);
629 }
630
631 // In most situation, we cannot have a conflict now, because it should have
632 // been detected before by pushing an interval lower bound past its upper
633 // bound. However, it is possible that when we push one bound, other bounds
634 // change. So if the upper bound is smaller than the current interval end,
635 // we abort so that the conflit reason will be better on the next call to
636 // the propagator.
637 const IntegerValue end = GetValue(end_index);
638 if (end > integer_trail_->UpperBound(expr)) return true;
639
640 // If we have a new Hall interval, add it to the set. Note that it will
641 // always be last, and if it overlaps some previous Hall intervals, it
642 // always overlaps them fully.
643 //
644 // Note: It is okay to not use entry.ub here if we want to fetch the last
645 // value, but in practice it shouldn't really change when we push a
646 // lower_bound and it is faster to use the cached entry.
647 if (end == entry.ub) {
648 const IntegerValue start = GetValue(start_index);
649 while (!hall_starts_.empty() && start <= hall_starts_.back()) {
650 hall_starts_.pop_back();
651 hall_ends_.pop_back();
652 }
653 DCHECK(hall_ends_.empty() || hall_ends_.back() < start);
654 hall_starts_.push_back(start);
655 hall_ends_.push_back(end);
656 }
657 }
658 return true;
659}
660
662 GenericLiteralWatcher* watcher) {
663 const int id = watcher->Register(this);
664 for (const CachedBounds& entry : bounds_) {
665 watcher->WatchAffineExpression(entry.expr, id);
666 }
668}
669
670} // namespace sat
671} // namespace operations_research
IntegerValue y
IntegerValue size
void RegisterWith(GenericLiteralWatcher *watcher)
AllDifferentBoundsPropagator(const std::vector< AffineExpression > &expressions, IntegerTrail *integer_trail)
Implementation of AllDifferentAC().
AllDifferentConstraint(std::vector< IntegerVariable > variables, IntegerEncoder *encoder, Trail *trail, IntegerTrail *integer_trail)
void RegisterWith(GenericLiteralWatcher *watcher)
void WatchAffineExpression(AffineExpression e, int id)
Definition integer.h:1520
void WatchLiteral(Literal l, int id, int watch_index=-1)
Definition integer.h:1844
void SetPropagatorPriority(int id, int priority)
Definition integer.cc:2358
int Register(PropagatorInterface *propagator)
Registers a propagator and returns its unique ids.
Definition integer.cc:2332
const std::vector< ValueLiteralPair > & FullDomainEncoding(IntegerVariable var) const
Definition integer.cc:145
void FullyEncodeVariable(IntegerVariable var)
Definition integer.cc:79
bool VariableIsFullyEncoded(IntegerVariable var) const
Definition integer.cc:110
IntegerValue LowerBound(IntegerVariable i) const
Returns the current lower/upper bound of the given integer variable.
Definition integer.h:1717
ABSL_MUST_USE_RESULT bool SafeEnqueue(IntegerLiteral i_lit, absl::Span< const IntegerLiteral > integer_reason)
Definition integer.cc:1252
IntegerValue UpperBound(IntegerVariable i) const
Definition integer.h:1721
std::vector< Literal > * MutableConflict()
Definition sat_base.h:433
std::vector< Literal > * GetEmptyVectorToStoreReason(int trail_index) const
Definition sat_base.h:393
ABSL_MUST_USE_RESULT bool EnqueueWithStoredReason(Literal true_literal)
Definition sat_base.h:347
const VariablesAssignment & Assignment() const
Definition sat_base.h:462
bool VariableIsAssigned(BooleanVariable var) const
Returns true iff the given variable is assigned.
Definition sat_base.h:196
bool LiteralIsFalse(Literal literal) const
Definition sat_base.h:185
int64_t b
Definition table.cc:45
int64_t a
Definition table.cc:44
Block * next
int64_t value
IntVar * var
GRBmodel * model
int index
const bool DEBUG_MODE
Definition macros.h:24
std::function< std::vector< ValueLiteralPair >(Model *)> FullyEncodeVariable(IntegerVariable var)
Definition integer.h:2074
std::function< void(Model *)> ClauseConstraint(absl::Span< const Literal > literals)
Definition sat_solver.h:933
std::function< void(Model *)> AllDifferentAC(const std::vector< IntegerVariable > &variables)
const IntegerVariable kNoIntegerVariable(-1)
const LiteralIndex kTrueLiteralIndex(-2)
std::function< void(Model *)> AtMostOneConstraint(const std::vector< Literal > &literals)
Definition sat_solver.h:919
const LiteralIndex kFalseLiteralIndex(-3)
std::function< void(Model *)> AllDifferentBinary(const std::vector< IntegerVariable > &vars)
std::function< void(Model *)> AllDifferentOnBounds(const std::vector< AffineExpression > &expressions)
In SWIG mode, we don't want anything besides these top-level includes.
void IncrementalSort(int max_comparisons, Iterator begin, Iterator end, Compare comp=Compare{}, bool is_stable=false)
Definition sort.h:46
STL namespace.
const Variable x
Definition qp_tests.cc:127
std::optional< int64_t > end
int64_t start
void FindStronglyConnectedComponents(NodeIndex num_nodes, const Graph &graph, SccOutput *components)
Simple wrapper function for most usage.
IntegerLiteral GreaterOrEqual(IntegerValue bound) const
var * coeff + constant >= bound.
Definition integer.h:1696
IntegerLiteral LowerOrEqual(IntegerValue bound) const
var * coeff + constant <= bound.
Definition integer.h:1708