Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
table.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// This file implements the table constraints.
15
16#include <algorithm>
17#include <cstdint>
18#include <limits>
19#include <memory>
20#include <string>
21#include <vector>
22
23#include "absl/container/flat_hash_map.h"
24#include "absl/log/check.h"
25#include "absl/strings/str_format.h"
26#include "absl/strings/str_join.h"
27#include "absl/types/span.h"
30#include "ortools/util/bitset.h"
33
34namespace operations_research {
35namespace {
36// ----- Presolve helpers -----
37// TODO(user): Move this out of this file.
38struct AffineTransformation { // y == a*x + b.
39 AffineTransformation() : a(1), b(0) {}
40 AffineTransformation(int64_t aa, int64_t bb) : a(aa), b(bb) {
41 CHECK_NE(a, 0);
42 }
43 int64_t a;
44 int64_t b;
45
46 bool Reverse(int64_t value, int64_t* const reverse) const {
47 const int64_t temp = value - b;
48 if (temp % a == 0) {
49 *reverse = temp / a;
50 DCHECK_EQ(Forward(*reverse), value);
51 return true;
52 } else {
53 return false;
54 }
55 }
56
57 int64_t Forward(int64_t value) const { return value * a + b; }
58
59 int64_t UnsafeReverse(int64_t value) const { return (value - b) / a; }
60
61 void Clear() {
62 a = 1;
63 b = 0;
64 }
65
66 std::string DebugString() const {
67 return absl::StrFormat("(%d * x + %d)", a, b);
68 }
69};
70
71// TODO(user): Move this out too.
72class VarLinearizer : public ModelParser {
73 public:
74 VarLinearizer() : target_var_(nullptr), transformation_(nullptr) {}
75 ~VarLinearizer() override {}
76
77 void VisitIntegerVariable(const IntVar* const variable,
78 const std::string& operation, int64_t value,
79 IntVar* const delegate) override {
80 if (operation == ModelVisitor::kSumOperation) {
81 AddConstant(value);
82 delegate->Accept(this);
83 } else if (operation == ModelVisitor::kDifferenceOperation) {
84 AddConstant(value);
85 PushMultiplier(-1);
86 delegate->Accept(this);
87 PopMultiplier();
88 } else if (operation == ModelVisitor::kProductOperation) {
89 PushMultiplier(value);
90 delegate->Accept(this);
91 PopMultiplier();
92 } else if (operation == ModelVisitor::kTraceOperation) {
93 *target_var_ = const_cast<IntVar*>(variable);
94 transformation_->a = multipliers_.back();
95 }
96 }
97
98 void VisitIntegerVariable(const IntVar* const variable,
99 IntExpr* const delegate) override {
100 *target_var_ = const_cast<IntVar*>(variable);
101 transformation_->a = multipliers_.back();
102 }
103
104 void Visit(const IntVar* const var, IntVar** const target_var,
105 AffineTransformation* const transformation) {
106 target_var_ = target_var;
107 transformation_ = transformation;
108 transformation->Clear();
109 PushMultiplier(1);
110 var->Accept(this);
111 PopMultiplier();
112 CHECK(multipliers_.empty());
113 }
114
115 std::string DebugString() const override { return "VarLinearizer"; }
116
117 private:
118 void AddConstant(int64_t constant) {
119 transformation_->b += constant * multipliers_.back();
120 }
121
122 void PushMultiplier(int64_t multiplier) {
123 if (multipliers_.empty()) {
124 multipliers_.push_back(multiplier);
125 } else {
126 multipliers_.push_back(multiplier * multipliers_.back());
127 }
128 }
129
130 void PopMultiplier() { multipliers_.pop_back(); }
131
132 std::vector<int64_t> multipliers_;
133 IntVar** target_var_;
134 AffineTransformation* transformation_;
135};
136
137static const int kBitsInUint64 = 64;
138
139// ----- Positive Table Constraint -----
140
141// Structure of the constraint:
142
143// Tuples are indexed, we maintain a bitset for active tuples.
144
145// For each var and each value, we maintain a bitset mask of tuples
146// containing this value for this variable.
147
148// Propagation: When a value is removed, blank all active tuples using the
149// var-value mask.
150// Then we scan all other variable/values to see if there is an active
151// tuple that supports it.
152
153class BasePositiveTableConstraint : public Constraint {
154 public:
155 BasePositiveTableConstraint(Solver* const s, const std::vector<IntVar*>& vars,
156 const IntTupleSet& tuples)
157 : Constraint(s),
158 tuple_count_(tuples.NumTuples()),
159 arity_(vars.size()),
160 vars_(arity_),
161 holes_(arity_),
162 iterators_(arity_),
163 tuples_(tuples),
164 transformations_(arity_) {
165 // This constraint is intensive on domain and holes iterations on
166 // variables. Thus we can visit all variables to get to the
167 // boolean or domain int var beneath it. Then we can reverse
168 // process the tupleset to move in parallel to the simplifications
169 // of the variables. This way, we can keep the memory efficient
170 // nature of shared tuplesets (especially important for
171 // transitions constraints which are a chain of table
172 // constraints). The cost in running time is small as the tuples
173 // are read only once to construct the bitset data structures.
174 VarLinearizer linearizer;
175 for (int i = 0; i < arity_; ++i) {
176 linearizer.Visit(vars[i], &vars_[i], &transformations_[i]);
177 }
178 // Create hole iterators
179 for (int i = 0; i < arity_; ++i) {
180 holes_[i] = vars_[i]->MakeHoleIterator(true);
181 iterators_[i] = vars_[i]->MakeDomainIterator(true);
182 }
183 }
184
185 ~BasePositiveTableConstraint() override {}
186
187 std::string DebugString() const override {
188 return absl::StrFormat("AllowedAssignments(arity = %d, tuple_count = %d)",
189 arity_, tuple_count_);
190 }
191
192 void Accept(ModelVisitor* const visitor) const override {
193 visitor->BeginVisitConstraint(ModelVisitor::kAllowedAssignments, this);
194 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kVarsArgument,
195 vars_);
196 visitor->VisitIntegerMatrixArgument(ModelVisitor::kTuplesArgument, tuples_);
197 visitor->EndVisitConstraint(ModelVisitor::kAllowedAssignments, this);
198 }
199
200 protected:
201 bool TupleValue(int tuple_index, int var_index, int64_t* const value) const {
202 return transformations_[var_index].Reverse(
203 tuples_.Value(tuple_index, var_index), value);
204 }
205
206 int64_t UnsafeTupleValue(int tuple_index, int var_index) const {
207 return transformations_[var_index].UnsafeReverse(
208 tuples_.Value(tuple_index, var_index));
209 }
210
211 bool IsTupleSupported(int tuple_index) {
212 for (int var_index = 0; var_index < arity_; ++var_index) {
213 int64_t value = 0;
214 if (!TupleValue(tuple_index, var_index, &value) ||
215 !vars_[var_index]->Contains(value)) {
216 return false;
217 }
218 }
219 return true;
220 }
221
222 const int tuple_count_;
223 const int arity_;
224 std::vector<IntVar*> vars_;
225 std::vector<IntVarIterator*> holes_;
226 std::vector<IntVarIterator*> iterators_;
227 std::vector<int64_t> to_remove_;
228
229 private:
230 // All allowed tuples.
231 const IntTupleSet tuples_;
232 // The set of affine transformations that describe the
233 // simplification of the variables.
234 std::vector<AffineTransformation> transformations_;
235};
236
237class PositiveTableConstraint : public BasePositiveTableConstraint {
238 public:
239 typedef absl::flat_hash_map<int, std::vector<uint64_t>> ValueBitset;
240
241 PositiveTableConstraint(Solver* const s, const std::vector<IntVar*>& vars,
242 const IntTupleSet& tuples)
243 : BasePositiveTableConstraint(s, vars, tuples),
244 word_length_(BitLength64(tuples.NumTuples())),
245 active_tuples_(tuples.NumTuples()) {}
246
247 ~PositiveTableConstraint() override {}
248
249 void Post() override {
251 solver(), this, &PositiveTableConstraint::Propagate, "Propagate");
252 for (int i = 0; i < arity_; ++i) {
253 vars_[i]->WhenDomain(d);
254 Demon* u = MakeConstraintDemon1(
255 solver(), this, &PositiveTableConstraint::Update, "Update", i);
256 vars_[i]->WhenDomain(u);
257 }
258 // Initialize masks.
259 masks_.clear();
260 masks_.resize(arity_);
261 for (int i = 0; i < tuple_count_; ++i) {
262 InitializeMask(i);
263 }
264 // Initialize the active tuple bitset.
265 std::vector<uint64_t> actives(word_length_, 0);
266 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
267 if (IsTupleSupported(tuple_index)) {
268 SetBit64(actives.data(), tuple_index);
269 }
270 }
271 active_tuples_.Init(solver(), actives);
272 }
273
274 void InitialPropagate() override {
275 // Build active_ structure.
276 for (int var_index = 0; var_index < arity_; ++var_index) {
277 for (const auto& it : masks_[var_index]) {
278 if (!vars_[var_index]->Contains(it.first)) {
279 active_tuples_.RevSubtract(solver(), it.second);
280 }
281 }
282 }
283 if (active_tuples_.Empty()) {
284 solver()->Fail();
285 }
286 // Remove unreached values.
287 for (int var_index = 0; var_index < arity_; ++var_index) {
288 const ValueBitset& mask = masks_[var_index];
289 IntVar* const var = vars_[var_index];
290 to_remove_.clear();
291 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
292 if (!mask.contains(value)) {
293 to_remove_.push_back(value);
294 }
295 }
296 if (!to_remove_.empty()) {
297 var->RemoveValues(to_remove_);
298 }
299 }
300 }
301
302 void Propagate() {
303 for (int var_index = 0; var_index < arity_; ++var_index) {
304 IntVar* const var = vars_[var_index];
305 to_remove_.clear();
306 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
307 if (!Supported(var_index, value)) {
308 to_remove_.push_back(value);
309 }
310 }
311 if (!to_remove_.empty()) {
312 var->RemoveValues(to_remove_);
313 }
314 }
315 }
316
317 void Update(int index) {
318 const ValueBitset& var_masks = masks_[index];
319 IntVar* const var = vars_[index];
320 const int64_t old_max = var->OldMax();
321 const int64_t vmin = var->Min();
322 const int64_t vmax = var->Max();
323 for (int64_t value = var->OldMin(); value < vmin; ++value) {
324 const auto& it = var_masks.find(value);
325 if (it != var_masks.end()) {
326 BlankActives(it->second);
327 }
328 }
329 for (const int64_t value : InitAndGetValues(holes_[index])) {
330 const auto& it = var_masks.find(value);
331 if (it != var_masks.end()) {
332 BlankActives(it->second);
333 }
334 }
335 for (int64_t value = vmax + 1; value <= old_max; ++value) {
336 const auto& it = var_masks.find(value);
337 if (it != var_masks.end()) {
338 BlankActives(it->second);
339 }
340 }
341 }
342
343 void BlankActives(const std::vector<uint64_t>& mask) {
344 if (!mask.empty()) {
345 active_tuples_.RevSubtract(solver(), mask);
346 if (active_tuples_.Empty()) {
347 solver()->Fail();
348 }
349 }
350 }
351
352 bool Supported(int var_index, int64_t value) {
353 DCHECK_GE(var_index, 0);
354 DCHECK_LT(var_index, arity_);
355 DCHECK(masks_[var_index].contains(value));
356 const std::vector<uint64_t>& mask = masks_[var_index][value];
357 int tmp = 0;
358 return active_tuples_.Intersects(mask, &tmp);
359 }
360
361 std::string DebugString() const override {
362 return absl::StrFormat("PositiveTableConstraint([%s], %d tuples)",
363 JoinDebugStringPtr(vars_, ", "), tuple_count_);
364 }
365
366 protected:
367 void InitializeMask(int tuple_index) {
368 std::vector<int64_t> cache(arity_);
369 for (int var_index = 0; var_index < arity_; ++var_index) {
370 if (!TupleValue(tuple_index, var_index, &cache[var_index])) {
371 return;
372 }
373 }
374 for (int var_index = 0; var_index < arity_; ++var_index) {
375 const int64_t value = cache[var_index];
376 std::vector<uint64_t>& mask = masks_[var_index][value];
377 if (mask.empty()) {
378 mask.assign(word_length_, 0);
379 }
380 SetBit64(mask.data(), tuple_index);
381 }
382 }
383
384 const int word_length_;
385 UnsortedNullableRevBitset active_tuples_;
386 std::vector<ValueBitset> masks_;
387 std::vector<uint64_t> temp_mask_;
388};
389
390// ----- Compact Tables -----
391
392class CompactPositiveTableConstraint : public BasePositiveTableConstraint {
393 public:
394 CompactPositiveTableConstraint(Solver* const s,
395 const std::vector<IntVar*>& vars,
396 const IntTupleSet& tuples)
397 : BasePositiveTableConstraint(s, vars, tuples),
398 word_length_(BitLength64(tuples.NumTuples())),
399 active_tuples_(tuples.NumTuples()),
400 masks_(arity_),
401 mask_starts_(arity_),
402 mask_ends_(arity_),
403 original_min_(arity_, 0),
404 temp_mask_(word_length_, 0),
405 supports_(arity_),
406 demon_(nullptr),
407 touched_var_(-1),
408 var_sizes_(arity_, 0) {}
409
410 ~CompactPositiveTableConstraint() override {}
411
412 void Post() override {
413 demon_ = solver()->RegisterDemon(MakeDelayedConstraintDemon0(
414 solver(), this, &CompactPositiveTableConstraint::Propagate,
415 "Propagate"));
416 for (int i = 0; i < arity_; ++i) {
417 Demon* const u = MakeConstraintDemon1(
418 solver(), this, &CompactPositiveTableConstraint::Update, "Update", i);
419 vars_[i]->WhenDomain(u);
420 }
421 for (int i = 0; i < arity_; ++i) {
422 var_sizes_.SetValue(solver(), i, vars_[i]->Size());
423 }
424 }
425
426 void InitialPropagate() override {
427 BuildMasks();
428 FillMasksAndActiveTuples();
429 ComputeMasksBoundaries();
430 BuildSupports();
431 RemoveUnsupportedValues();
432 }
433
434 // ----- Propagation -----
435
436 void Propagate() {
437 // Reset touch_var_ if in mode (more than 1 variable was modified).
438 if (touched_var_ == -2) {
439 touched_var_ = -1;
440 }
441 // This methods scans all values of all variables to see if they
442 // are still supported.
443 // This method is not attached to any particular variable, but is pushed
444 // at a delayed priority after Update(var_index) is called.
445 for (int var_index = 0; var_index < arity_; ++var_index) {
446 // This demons runs in low priority. Thus we know all the
447 // variables that have changed since the last time it was run.
448 // In that case, if only one var was touched, as propagation is
449 // exact, we do not need to recheck that variable.
450 if (var_index == touched_var_) {
451 touched_var_ = -1; // Clean now, it is a 1 time flag.
452 continue;
453 }
454 IntVar* const var = vars_[var_index];
455 const int64_t original_min = original_min_[var_index];
456 const int64_t var_size = var->Size();
457 // The domain iterator is very slow, let's try to see if we can
458 // work our way around.
459 switch (var_size) {
460 case 1: {
461 if (!Supported(var_index, var->Min() - original_min)) {
462 solver()->Fail();
463 }
464 break;
465 }
466 case 2: {
467 const int64_t var_min = var->Min();
468 const int64_t var_max = var->Max();
469 const bool min_support = Supported(var_index, var_min - original_min);
470 const bool max_support = Supported(var_index, var_max - original_min);
471 if (!min_support) {
472 if (!max_support) {
473 solver()->Fail();
474 } else {
475 var->SetValue(var_max);
476 var_sizes_.SetValue(solver(), var_index, 1);
477 }
478 } else if (!max_support) {
479 var->SetValue(var_min);
480 var_sizes_.SetValue(solver(), var_index, 1);
481 }
482 break;
483 }
484 default: {
485 to_remove_.clear();
486 const int64_t var_min = var->Min();
487 const int64_t var_max = var->Max();
488 int64_t new_min = var_min;
489 int64_t new_max = var_max;
490 // If the domain of a variable is an interval, it is much
491 // faster to iterate on that interval instead of using the
492 // iterator.
493 if (var_max - var_min + 1 == var_size) {
494 for (; new_min <= var_max; ++new_min) {
495 if (Supported(var_index, new_min - original_min)) {
496 break;
497 }
498 }
499 for (; new_max >= new_min; --new_max) {
500 if (Supported(var_index, new_max - original_min)) {
501 break;
502 }
503 }
504 var->SetRange(new_min, new_max);
505 for (int64_t value = new_min + 1; value < new_max; ++value) {
506 if (!Supported(var_index, value - original_min)) {
507 to_remove_.push_back(value);
508 }
509 }
510 } else { // Domain is sparse.
511 // Let's not collect all values below the first supported
512 // value as this can easily and more rapidly be taken care
513 // of by a SetRange() call.
514 new_min = std::numeric_limits<int64_t>::max(); // escape value.
515 for (const int64_t value :
516 InitAndGetValues(iterators_[var_index])) {
517 if (!Supported(var_index, value - original_min)) {
518 to_remove_.push_back(value);
519 } else {
520 if (new_min == std::numeric_limits<int64_t>::max()) {
521 new_min = value;
522 // This will be covered by the SetRange.
523 to_remove_.clear();
524 }
525 new_max = value;
526 }
527 }
528 var->SetRange(new_min, new_max);
529 // Trim the to_remove vector.
530 int index = to_remove_.size() - 1;
531 while (index >= 0 && to_remove_[index] > new_max) {
532 index--;
533 }
534 to_remove_.resize(index + 1);
535 }
536 var->RemoveValues(to_remove_);
537 var_sizes_.SetValue(solver(), var_index, var->Size());
538 }
539 }
540 }
541 }
542
543 void Update(int var_index) {
544 if (vars_[var_index]->Size() == var_sizes_.Value(var_index)) {
545 return;
546 }
547 // This method will update the set of active tuples by masking out all
548 // tuples attached to values of the variables that have been removed.
549
550 // We first collect the complete set of tuples to blank out in temp_mask_.
551 IntVar* const var = vars_[var_index];
552 bool changed = false;
553 const int64_t omin = original_min_[var_index];
554 const int64_t var_size = var->Size();
555 const int64_t var_min = var->Min();
556 const int64_t var_max = var->Max();
557
558 switch (var_size) {
559 case 1: {
560 changed = AndMaskWithActive(masks_[var_index][var_min - omin]);
561 break;
562 }
563 case 2: {
564 SetTempMask(var_index, var_min - omin);
565 OrTempMask(var_index, var_max - omin);
566 changed = AndMaskWithActive(temp_mask_);
567 break;
568 }
569 default: {
570 const int64_t estimated_hole_size =
571 var_sizes_.Value(var_index) - var_size;
572 const int64_t old_min = var->OldMin();
573 const int64_t old_max = var->OldMax();
574 // Rough estimation of the number of operation if we scan
575 // deltas in the domain of the variable.
576 const int64_t number_of_operations =
577 estimated_hole_size + var_min - old_min + old_max - var_max;
578 if (number_of_operations < var_size) {
579 // Let's scan the removed values since last run.
580 for (int64_t value = old_min; value < var_min; ++value) {
581 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
582 }
583 for (const int64_t value : InitAndGetValues(holes_[var_index])) {
584 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
585 }
586 for (int64_t value = var_max + 1; value <= old_max; ++value) {
587 changed |= SubtractMaskFromActive(masks_[var_index][value - omin]);
588 }
589 } else {
590 ClearTempMask();
591 // Let's build the mask of supported tuples from the current
592 // domain.
593 if (var_max - var_min + 1 == var_size) { // Contiguous.
594 for (int64_t value = var_min; value <= var_max; ++value) {
595 OrTempMask(var_index, value - omin);
596 }
597 } else {
598 for (const int64_t value :
599 InitAndGetValues(iterators_[var_index])) {
600 OrTempMask(var_index, value - omin);
601 }
602 }
603 // Then we and this mask with active_tuples_.
604 changed = AndMaskWithActive(temp_mask_);
605 }
606 // We maintain the size of the variables incrementally (when it
607 // is > 2).
608 var_sizes_.SetValue(solver(), var_index, var_size);
609 }
610 }
611 // We push the propagate method only if something has changed.
612 if (changed) {
613 if (touched_var_ == -1 || touched_var_ == var_index) {
614 touched_var_ = var_index;
615 } else {
616 touched_var_ = -2; // more than one var.
617 }
618 EnqueueDelayedDemon(demon_);
619 }
620 }
621
622 std::string DebugString() const override {
623 return absl::StrFormat("CompactPositiveTableConstraint([%s], %d tuples)",
624 JoinDebugStringPtr(vars_, ", "), tuple_count_);
625 }
626
627 private:
628 // ----- Initialization -----
629
630 void BuildMasks() {
631 // Build masks.
632 for (int i = 0; i < arity_; ++i) {
633 original_min_[i] = vars_[i]->Min();
634 const int64_t span = vars_[i]->Max() - original_min_[i] + 1;
635 masks_[i].resize(span);
636 }
637 }
638
639 void FillMasksAndActiveTuples() {
640 std::vector<uint64_t> actives(word_length_, 0);
641 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
642 if (IsTupleSupported(tuple_index)) {
643 SetBit64(actives.data(), tuple_index);
644 // Fill in all masks.
645 for (int var_index = 0; var_index < arity_; ++var_index) {
646 const int64_t value = UnsafeTupleValue(tuple_index, var_index);
647 const int64_t value_index = value - original_min_[var_index];
648 DCHECK_GE(value_index, 0);
649 DCHECK_LT(value_index, masks_[var_index].size());
650 if (masks_[var_index][value_index].empty()) {
651 masks_[var_index][value_index].assign(word_length_, 0);
652 }
653 SetBit64(masks_[var_index][value_index].data(), tuple_index);
654 }
655 }
656 }
657 active_tuples_.Init(solver(), actives);
658 }
659
660 void RemoveUnsupportedValues() {
661 // remove unreached values.
662 for (int var_index = 0; var_index < arity_; ++var_index) {
663 IntVar* const var = vars_[var_index];
664 to_remove_.clear();
665 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
666 if (masks_[var_index][value - original_min_[var_index]].empty()) {
667 to_remove_.push_back(value);
668 }
669 }
670 if (!to_remove_.empty()) {
671 var->RemoveValues(to_remove_);
672 }
673 }
674 }
675
676 void ComputeMasksBoundaries() {
677 for (int var_index = 0; var_index < arity_; ++var_index) {
678 mask_starts_[var_index].resize(masks_[var_index].size());
679 mask_ends_[var_index].resize(masks_[var_index].size());
680 for (int value_index = 0; value_index < masks_[var_index].size();
681 ++value_index) {
682 const std::vector<uint64_t>& mask = masks_[var_index][value_index];
683 if (mask.empty()) {
684 continue;
685 }
686 int start = 0;
687 while (start < word_length_ && mask[start] == 0) {
688 start++;
689 }
690 DCHECK_LT(start, word_length_);
691 int end = word_length_ - 1;
692 while (end > start && mask[end] == 0) {
693 end--;
694 }
695 DCHECK_LE(start, end);
696 DCHECK_NE(mask[start], 0);
697 DCHECK_NE(mask[end], 0);
698 mask_starts_[var_index][value_index] = start;
699 mask_ends_[var_index][value_index] = end;
700 }
701 }
702 }
703
704 void BuildSupports() {
705 for (int var_index = 0; var_index < arity_; ++var_index) {
706 supports_[var_index].resize(masks_[var_index].size());
707 }
708 }
709
710 // ----- Helpers during propagation -----
711
712 bool AndMaskWithActive(const std::vector<uint64_t>& mask) {
713 const bool result = active_tuples_.RevAnd(solver(), mask);
714 if (active_tuples_.Empty()) {
715 solver()->Fail();
716 }
717 return result;
718 }
719
720 bool SubtractMaskFromActive(const std::vector<uint64_t>& mask) {
721 const bool result = active_tuples_.RevSubtract(solver(), mask);
722 if (active_tuples_.Empty()) {
723 solver()->Fail();
724 }
725 return result;
726 }
727
728 bool Supported(int var_index, int64_t value_index) {
729 DCHECK_GE(var_index, 0);
730 DCHECK_LT(var_index, arity_);
731 DCHECK_GE(value_index, 0);
732 DCHECK_LT(value_index, masks_[var_index].size());
733 const std::vector<uint64_t>& mask = masks_[var_index][value_index];
734 DCHECK(!mask.empty());
735 return active_tuples_.Intersects(mask, &supports_[var_index][value_index]);
736 }
737
738 void OrTempMask(int var_index, int64_t value_index) {
739 const std::vector<uint64_t>& mask = masks_[var_index][value_index];
740 if (!mask.empty()) {
741 const int mask_span = mask_ends_[var_index][value_index] -
742 mask_starts_[var_index][value_index] + 1;
743 if (active_tuples_.ActiveWordSize() < mask_span) {
744 for (int i : active_tuples_.active_words()) {
745 temp_mask_[i] |= mask[i];
746 }
747 } else {
748 for (int i = mask_starts_[var_index][value_index];
749 i <= mask_ends_[var_index][value_index]; ++i) {
750 temp_mask_[i] |= mask[i];
751 }
752 }
753 }
754 }
755
756 void SetTempMask(int var_index, int64_t value_index) {
757 // We assume memset is much faster that looping and assigning.
758 // Still we do want to stay sparse if possible.
759 // Thus we switch between dense and sparse initialization by
760 // comparing the number of operations in both case, with constant factor.
761 // TODO(user): experiment with different constant values.
762 if (active_tuples_.ActiveWordSize() < word_length_ / 4) {
763 for (int i : active_tuples_.active_words()) {
764 temp_mask_[i] = masks_[var_index][value_index][i];
765 }
766 } else {
767 temp_mask_ = masks_[var_index][value_index];
768 }
769 }
770
771 void ClearTempMask() {
772 // See comment above.
773 if (active_tuples_.ActiveWordSize() < word_length_ / 4) {
774 for (int i : active_tuples_.active_words()) {
775 temp_mask_[i] = 0;
776 }
777 } else {
778 temp_mask_.assign(word_length_, 0);
779 }
780 }
781
782 // The length in 64 bit words of the number of tuples.
783 int64_t word_length_;
784 // The active bitset.
785 UnsortedNullableRevBitset active_tuples_;
786 // The masks per value per variable.
787 std::vector<std::vector<std::vector<uint64_t>>> masks_;
788 // The range of active indices in the masks.
789 std::vector<std::vector<int>> mask_starts_;
790 std::vector<std::vector<int>> mask_ends_;
791 // The min on the vars at creation time.
792 std::vector<int64_t> original_min_;
793 // A temporary mask use for computation.
794 std::vector<uint64_t> temp_mask_;
795 // The index of the word in the active bitset supporting each value per
796 // variable.
797 std::vector<std::vector<int>> supports_;
798 Demon* demon_;
799 int touched_var_;
800 RevArray<int64_t> var_sizes_;
801};
802
803// ----- Small Compact Table. -----
804
805// TODO(user): regroup code with CompactPositiveTableConstraint.
806
807class SmallCompactPositiveTableConstraint : public BasePositiveTableConstraint {
808 public:
809 SmallCompactPositiveTableConstraint(Solver* const s,
810 const std::vector<IntVar*>& vars,
811 const IntTupleSet& tuples)
812 : BasePositiveTableConstraint(s, vars, tuples),
813 active_tuples_(0),
814 stamp_(0),
815 masks_(arity_),
816 original_min_(arity_, 0),
817 demon_(nullptr),
818 touched_var_(-1) {
819 CHECK_GE(tuple_count_, 0);
820 CHECK_GE(arity_, 0);
821 CHECK_LE(tuples.NumTuples(), kBitsInUint64);
822 }
823
824 ~SmallCompactPositiveTableConstraint() override {}
825
826 void Post() override {
827 demon_ = solver()->RegisterDemon(MakeDelayedConstraintDemon0(
828 solver(), this, &SmallCompactPositiveTableConstraint::Propagate,
829 "Propagate"));
830 for (int i = 0; i < arity_; ++i) {
831 if (!vars_[i]->Bound()) {
832 Demon* const update_demon = MakeConstraintDemon1(
833 solver(), this, &SmallCompactPositiveTableConstraint::Update,
834 "Update", i);
835 vars_[i]->WhenDomain(update_demon);
836 }
837 }
838 stamp_ = 0;
839 }
840
841 void InitMasks() {
842 // Build masks.
843 for (int i = 0; i < arity_; ++i) {
844 original_min_[i] = vars_[i]->Min();
845 const int64_t span = vars_[i]->Max() - original_min_[i] + 1;
846 masks_[i].assign(span, 0);
847 }
848 }
849
850 bool IsTupleSupported(int tuple_index) {
851 for (int var_index = 0; var_index < arity_; ++var_index) {
852 int64_t value = 0;
853 if (!TupleValue(tuple_index, var_index, &value) ||
854 !vars_[var_index]->Contains(value)) {
855 return false;
856 }
857 }
858 return true;
859 }
860
861 void ComputeActiveTuples() {
862 active_tuples_ = 0;
863 // Compute active_tuples_ and update masks.
864 for (int tuple_index = 0; tuple_index < tuple_count_; ++tuple_index) {
865 if (IsTupleSupported(tuple_index)) {
866 const uint64_t local_mask = OneBit64(tuple_index);
867 active_tuples_ |= local_mask;
868 for (int var_index = 0; var_index < arity_; ++var_index) {
869 const int64_t value = UnsafeTupleValue(tuple_index, var_index);
870 masks_[var_index][value - original_min_[var_index]] |= local_mask;
871 }
872 }
873 }
874 if (!active_tuples_) {
875 solver()->Fail();
876 }
877 }
878
879 void RemoveUnsupportedValues() {
880 // remove unreached values.
881 for (int var_index = 0; var_index < arity_; ++var_index) {
882 IntVar* const var = vars_[var_index];
883 const int64_t original_min = original_min_[var_index];
884 to_remove_.clear();
885 for (const int64_t value : InitAndGetValues(iterators_[var_index])) {
886 if (masks_[var_index][value - original_min] == 0) {
887 to_remove_.push_back(value);
888 }
889 }
890 if (!to_remove_.empty()) {
891 var->RemoveValues(to_remove_);
892 }
893 }
894 }
895
896 void InitialPropagate() override {
897 InitMasks();
898 ComputeActiveTuples();
899 RemoveUnsupportedValues();
900 }
901
902 void Propagate() {
903 // This methods scans all the values of all the variables to see if they
904 // are still supported.
905 // This method is not attached to any particular variable, but is pushed
906 // at a delayed priority and awakened by Update(var_index).
907
908 // Reset touch_var_ if in mode (more than 1 variable was modified).
909 if (touched_var_ == -2) {
910 touched_var_ = -1;
911 }
912
913 // We cache active_tuples_.
914 const uint64_t actives = active_tuples_;
915
916 // We scan all variables and check their domains.
917 for (int var_index = 0; var_index < arity_; ++var_index) {
918 // This demons runs in low priority. Thus we know all the
919 // variables that have changed since the last time it was run.
920 // In that case, if only one var was touched, as propagation is
921 // exact, we do not need to recheck that variable.
922 if (var_index == touched_var_) {
923 touched_var_ = -1; // Clean it, it is a one time flag.
924 continue;
925 }
926 const std::vector<uint64_t>& var_mask = masks_[var_index];
927 const int64_t original_min = original_min_[var_index];
928 IntVar* const var = vars_[var_index];
929 const int64_t var_size = var->Size();
930 switch (var_size) {
931 case 1: {
932 if ((var_mask[var->Min() - original_min] & actives) == 0) {
933 // The difference with the non-small version of the table
934 // is that checking the validity of the resulting active
935 // tuples is cheap. Therefore we do not delay the check
936 // code.
937 solver()->Fail();
938 }
939 break;
940 }
941 case 2: {
942 const int64_t var_min = var->Min();
943 const int64_t var_max = var->Max();
944 const bool min_support =
945 (var_mask[var_min - original_min] & actives) != 0;
946 const bool max_support =
947 (var_mask[var_max - original_min] & actives) != 0;
948 if (!min_support && !max_support) {
949 solver()->Fail();
950 } else if (!min_support) {
951 var->SetValue(var_max);
952 } else if (!max_support) {
953 var->SetValue(var_min);
954 }
955 break;
956 }
957 default: {
958 to_remove_.clear();
959 const int64_t var_min = var->Min();
960 const int64_t var_max = var->Max();
961 int64_t new_min = var_min;
962 int64_t new_max = var_max;
963 if (var_max - var_min + 1 == var_size) {
964 // Contiguous case.
965 for (; new_min <= var_max; ++new_min) {
966 if ((var_mask[new_min - original_min] & actives) != 0) {
967 break;
968 }
969 }
970 for (; new_max >= new_min; --new_max) {
971 if ((var_mask[new_max - original_min] & actives) != 0) {
972 break;
973 }
974 }
975 var->SetRange(new_min, new_max);
976 for (int64_t value = new_min + 1; value < new_max; ++value) {
977 if ((var_mask[value - original_min] & actives) == 0) {
978 to_remove_.push_back(value);
979 }
980 }
981 } else {
982 bool min_set = false;
983 int last_size = 0;
984 for (const int64_t value :
985 InitAndGetValues(iterators_[var_index])) {
986 // The iterator is not safe w.r.t. deletion. Thus we
987 // postpone all value removals.
988 if ((var_mask[value - original_min] & actives) == 0) {
989 if (min_set) {
990 to_remove_.push_back(value);
991 }
992 } else {
993 if (!min_set) {
994 new_min = value;
995 min_set = true;
996 }
997 new_max = value;
998 last_size = to_remove_.size();
999 }
1000 }
1001 if (min_set) {
1002 var->SetRange(new_min, new_max);
1003 } else {
1004 solver()->Fail();
1005 }
1006 to_remove_.resize(last_size);
1007 }
1008 var->RemoveValues(to_remove_);
1009 }
1010 }
1011 }
1012 }
1013
1014 void Update(int var_index) {
1015 // This method updates the set of active tuples by masking out all
1016 // tuples attached to values of the variables that have been removed.
1017
1018 IntVar* const var = vars_[var_index];
1019 const int64_t original_min = original_min_[var_index];
1020 const int64_t var_size = var->Size();
1021 switch (var_size) {
1022 case 1: {
1023 ApplyMask(var_index, masks_[var_index][var->Min() - original_min]);
1024 return;
1025 }
1026 case 2: {
1027 ApplyMask(var_index, masks_[var_index][var->Min() - original_min] |
1028 masks_[var_index][var->Max() - original_min]);
1029 return;
1030 }
1031 default: {
1032 // We first collect the complete set of tuples to blank out in
1033 // temp_mask.
1034 const std::vector<uint64_t>& var_mask = masks_[var_index];
1035 const int64_t old_min = var->OldMin();
1036 const int64_t old_max = var->OldMax();
1037 const int64_t var_min = var->Min();
1038 const int64_t var_max = var->Max();
1039 const bool contiguous = var_size == var_max - var_min + 1;
1040 const bool nearly_contiguous =
1041 var_size > (var_max - var_min + 1) * 7 / 10;
1042
1043 // Count the number of masks to collect to compare the deduction
1044 // vs the construction of the new active bitset.
1045 // TODO(user): Implement HolesSize() on IntVar* and use it
1046 // to remove this code and the var_sizes in the non_small
1047 // version.
1048 uint64_t hole_mask = 0;
1049 if (!contiguous) {
1050 for (const int64_t value : InitAndGetValues(holes_[var_index])) {
1051 hole_mask |= var_mask[value - original_min];
1052 }
1053 }
1054 const int64_t hole_operations = var_min - old_min + old_max - var_max;
1055 // We estimate the domain iterator to be 4x slower.
1056 const int64_t domain_operations = contiguous ? var_size : 4 * var_size;
1057 if (hole_operations < domain_operations) {
1058 for (int64_t value = old_min; value < var_min; ++value) {
1059 hole_mask |= var_mask[value - original_min];
1060 }
1061 for (int64_t value = var_max + 1; value <= old_max; ++value) {
1062 hole_mask |= var_mask[value - original_min];
1063 }
1064 // We reverse the mask as this was negative information.
1065 ApplyMask(var_index, ~hole_mask);
1066 } else {
1067 uint64_t domain_mask = 0;
1068 if (contiguous) {
1069 for (int64_t value = var_min; value <= var_max; ++value) {
1070 domain_mask |= var_mask[value - original_min];
1071 }
1072 } else if (nearly_contiguous) {
1073 for (int64_t value = var_min; value <= var_max; ++value) {
1074 if (var->Contains(value)) {
1075 domain_mask |= var_mask[value - original_min];
1076 }
1077 }
1078 } else {
1079 for (const int64_t value :
1080 InitAndGetValues(iterators_[var_index])) {
1081 domain_mask |= var_mask[value - original_min];
1082 }
1083 }
1084 ApplyMask(var_index, domain_mask);
1085 }
1086 }
1087 }
1088 }
1089
1090 std::string DebugString() const override {
1091 return absl::StrFormat(
1092 "SmallCompactPositiveTableConstraint([%s], %d tuples)",
1093 JoinDebugStringPtr(vars_, ", "), tuple_count_);
1094 }
1095
1096 private:
1097 void ApplyMask(int var_index, uint64_t mask) {
1098 if ((~mask & active_tuples_) != 0) {
1099 // Check if we need to save the active_tuples in this node.
1100 const uint64_t current_stamp = solver()->stamp();
1101 if (stamp_ < current_stamp) {
1102 stamp_ = current_stamp;
1103 solver()->SaveValue(&active_tuples_);
1104 }
1105 active_tuples_ &= mask;
1106 if (active_tuples_) {
1107 // Maintain touched_var_.
1108 if (touched_var_ == -1 || touched_var_ == var_index) {
1109 touched_var_ = var_index;
1110 } else {
1111 touched_var_ = -2; // more than one var.
1112 }
1113 EnqueueDelayedDemon(demon_);
1114 } else {
1115 // Clean it before failing.
1116 touched_var_ = -1;
1117 solver()->Fail();
1118 }
1119 }
1120 }
1121
1122 // Bitset of active tuples.
1123 uint64_t active_tuples_;
1124 // Stamp of the active_tuple bitset.
1125 uint64_t stamp_;
1126 // The masks per value per variable.
1127 std::vector<std::vector<uint64_t>> masks_;
1128 // The min on the vars at creation time.
1129 std::vector<int64_t> original_min_;
1130 Demon* demon_;
1131 int touched_var_;
1132};
1133
1134bool HasCompactDomains(const std::vector<IntVar*>& vars) {
1135 return true; // Always assume compact table.
1136}
1137
1138// ---------- Deterministic Finite Automaton ----------
1139
1140// This constraint implements a finite automaton when transitions are
1141// the values of the variables in the array.
1142// that is state[i+1] = transition[var[i]][state[i]] if
1143// (state[i], var[i], state[i+1]) in the transition table.
1144// There is only one possible transition for a state/value pair.
1145class TransitionConstraint : public Constraint {
1146 public:
1147 static const int kStatePosition;
1148 static const int kNextStatePosition;
1149 static const int kTransitionTupleSize;
1150 TransitionConstraint(Solver* const s, const std::vector<IntVar*>& vars,
1151 const IntTupleSet& transition_table,
1152 int64_t initial_state,
1153 const std::vector<int64_t>& final_states)
1154 : Constraint(s),
1155 vars_(vars),
1156 transition_table_(transition_table),
1157 initial_state_(initial_state),
1158 final_states_(final_states) {}
1159
1160 TransitionConstraint(Solver* const s, const std::vector<IntVar*>& vars,
1161 const IntTupleSet& transition_table,
1162 int64_t initial_state,
1163 absl::Span<const int> final_states)
1164 : Constraint(s),
1165 vars_(vars),
1166 transition_table_(transition_table),
1167 initial_state_(initial_state),
1168 final_states_(final_states.size()) {
1169 for (int i = 0; i < final_states.size(); ++i) {
1170 final_states_[i] = final_states[i];
1171 }
1172 }
1173
1174 ~TransitionConstraint() override {}
1175
1176 void Post() override {
1177 Solver* const s = solver();
1178 int64_t state_min = std::numeric_limits<int64_t>::max();
1179 int64_t state_max = std::numeric_limits<int64_t>::min();
1180 const int nb_vars = vars_.size();
1181 for (int i = 0; i < transition_table_.NumTuples(); ++i) {
1182 state_max =
1183 std::max(state_max, transition_table_.Value(i, kStatePosition));
1184 state_max =
1185 std::max(state_max, transition_table_.Value(i, kNextStatePosition));
1186 state_min =
1187 std::min(state_min, transition_table_.Value(i, kStatePosition));
1188 state_min =
1189 std::min(state_min, transition_table_.Value(i, kNextStatePosition));
1190 }
1191
1192 std::vector<IntVar*> states;
1193 states.push_back(s->MakeIntConst(initial_state_));
1194 for (int var_index = 1; var_index < nb_vars; ++var_index) {
1195 states.push_back(s->MakeIntVar(state_min, state_max));
1196 }
1197 states.push_back(s->MakeIntVar(final_states_));
1198 CHECK_EQ(nb_vars + 1, states.size());
1199
1200 const int num_tuples = transition_table_.NumTuples();
1201
1202 for (int var_index = 0; var_index < nb_vars; ++var_index) {
1203 std::vector<IntVar*> tmp_vars(3);
1204 tmp_vars[0] = states[var_index];
1205 tmp_vars[1] = vars_[var_index];
1206 tmp_vars[2] = states[var_index + 1];
1207 // We always build the compact versions of the tables.
1208 if (num_tuples <= kBitsInUint64) {
1209 s->AddConstraint(s->RevAlloc(new SmallCompactPositiveTableConstraint(
1210 s, tmp_vars, transition_table_)));
1211 } else {
1212 s->AddConstraint(s->RevAlloc(new CompactPositiveTableConstraint(
1213 s, tmp_vars, transition_table_)));
1214 }
1215 }
1216 }
1217
1218 void InitialPropagate() override {}
1219
1220 void Accept(ModelVisitor* const visitor) const override {
1221 visitor->BeginVisitConstraint(ModelVisitor::kTransition, this);
1222 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kVarsArgument,
1223 vars_);
1224 visitor->VisitIntegerArgument(ModelVisitor::kInitialState, initial_state_);
1225 visitor->VisitIntegerArrayArgument(ModelVisitor::kFinalStatesArgument,
1226 final_states_);
1227 visitor->VisitIntegerMatrixArgument(ModelVisitor::kTuplesArgument,
1228 transition_table_);
1229 visitor->EndVisitConstraint(ModelVisitor::kTransition, this);
1230 }
1231
1232 std::string DebugString() const override {
1233 return absl::StrFormat(
1234 "TransitionConstraint([%s], %d transitions, initial = %d, final = "
1235 "[%s])",
1236 JoinDebugStringPtr(vars_, ", "), transition_table_.NumTuples(),
1237 initial_state_, absl::StrJoin(final_states_, ", "));
1238 }
1239
1240 private:
1241 // Variable representing transitions between states. See header file.
1242 const std::vector<IntVar*> vars_;
1243 // The transition as tuples (state, value, next_state).
1244 const IntTupleSet transition_table_;
1245 // The initial state before the first transition.
1246 const int64_t initial_state_;
1247 // Vector of final state after the last transision.
1248 std::vector<int64_t> final_states_;
1249};
1250
1251const int TransitionConstraint::kStatePosition = 0;
1252const int TransitionConstraint::kNextStatePosition = 2;
1253const int TransitionConstraint::kTransitionTupleSize = 3;
1254} // namespace
1255
1256// --------- API ----------
1257
1258Constraint* Solver::MakeAllowedAssignments(const std::vector<IntVar*>& vars,
1259 const IntTupleSet& tuples) {
1260 if (HasCompactDomains(vars)) {
1261 if (tuples.NumTuples() < kBitsInUint64 && parameters_.use_small_table()) {
1262 return RevAlloc(
1263 new SmallCompactPositiveTableConstraint(this, vars, tuples));
1264 } else {
1265 return RevAlloc(new CompactPositiveTableConstraint(this, vars, tuples));
1266 }
1267 }
1268 return RevAlloc(new PositiveTableConstraint(this, vars, tuples));
1269}
1270
1272 const std::vector<IntVar*>& vars, const IntTupleSet& transition_table,
1273 int64_t initial_state, const std::vector<int64_t>& final_states) {
1274 return RevAlloc(new TransitionConstraint(this, vars, transition_table,
1275 initial_state, final_states));
1276}
1277
1279 const std::vector<IntVar*>& vars, const IntTupleSet& transition_table,
1280 int64_t initial_state, const std::vector<int>& final_states) {
1281 return RevAlloc(new TransitionConstraint(this, vars, transition_table,
1282 initial_state, final_states));
1283}
1284
1285} // namespace operations_research
Constraint * MakeAllowedAssignments(const std::vector< IntVar * > &vars, const IntTupleSet &tuples)
Definition table.cc:1258
Constraint * MakeTransitionConstraint(const std::vector< IntVar * > &vars, const IntTupleSet &transition_table, int64_t initial_state, const std::vector< int64_t > &final_states)
Definition table.cc:1271
OR-Tools root namespace.
std::string JoinDebugStringPtr(const std::vector< T > &v, absl::string_view separator)
Demon * MakeDelayedConstraintDemon0(Solver *const s, T *const ct, void(T::*method)(), const std::string &name)
ClosedInterval::Iterator end(ClosedInterval interval)
uint64_t OneBit64(int pos)
Definition bitset.h:41
uint64_t BitLength64(uint64_t size)
Definition bitset.h:341
Demon * MakeConstraintDemon1(Solver *const s, T *const ct, void(T::*method)(P), const std::string &name, P param1)
void SetBit64(uint64_t *const bitset, uint64_t pos)
Definition bitset.h:357
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition iterators.h:115