Google OR-Tools v9.15
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
routing_filters.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// Implementation of local search filters for routing models.
15
17
18#include <stddef.h>
19
20#include <algorithm>
21#include <cstdint>
22#include <deque>
23#include <functional>
24#include <limits>
25#include <memory>
26#include <optional>
27#include <string>
28#include <tuple>
29#include <utility>
30#include <vector>
31
32#include "absl/algorithm/container.h"
33#include "absl/container/btree_set.h"
34#include "absl/container/flat_hash_map.h"
35#include "absl/container/flat_hash_set.h"
36#include "absl/flags/flag.h"
37#include "absl/functional/any_invocable.h"
38#include "absl/log/check.h"
39#include "absl/log/log.h"
40#include "absl/strings/str_cat.h"
41#include "absl/strings/string_view.h"
42#include "absl/types/span.h"
45#include "ortools/base/types.h"
54#include "ortools/util/bitset.h"
58
59ABSL_FLAG(bool, routing_strong_debug_checks, false,
60 "Run stronger checks in debug; these stronger tests might change "
61 "the complexity of the code in particular.");
62
63namespace operations_research {
64
65namespace {
66// Route constraint filter
67
68class RouteConstraintFilter : public BasePathFilter {
69 public:
70 explicit RouteConstraintFilter(const RoutingModel& routing_model)
71 : BasePathFilter(routing_model.Nexts(),
72 routing_model.Size() + routing_model.vehicles(),
73 routing_model.GetPathsMetadata()),
74 routing_model_(routing_model),
75 current_vehicle_cost_(0),
76 delta_vehicle_cost_(0),
77 current_vehicle_costs_(routing_model.vehicles(), 0) {
78 start_to_vehicle_.resize(Size(), -1);
79 vehicle_to_start_.resize(routing_model.vehicles());
80 for (int v = 0; v < routing_model.vehicles(); v++) {
81 const int64_t start = routing_model.Start(v);
82 start_to_vehicle_[start] = v;
83 vehicle_to_start_[v] = start;
84 }
85 }
86 ~RouteConstraintFilter() override = default;
87 std::string DebugString() const override { return "RouteConstraintFilter"; }
88 int64_t GetSynchronizedObjectiveValue() const override {
89 return current_vehicle_cost_;
90 }
91 int64_t GetAcceptedObjectiveValue() const override {
92 return lns_detected() ? 0 : delta_vehicle_cost_;
93 }
94
95 private:
96 void OnSynchronizePathFromStart(int64_t start) override {
97 route_.clear();
98 int64_t node = start;
99 while (node < Size()) {
100 route_.push_back(node);
101 node = Value(node);
102 }
103 route_.push_back(node);
104 std::optional<int64_t> route_cost = routing_model_.GetRouteCost(route_);
105 DCHECK(route_cost.has_value());
106 current_vehicle_costs_[start_to_vehicle_[start]] = route_cost.value();
107 }
108 void OnAfterSynchronizePaths() override {
109 current_vehicle_cost_ = 0;
110 for (int vehicle = 0; vehicle < vehicle_to_start_.size(); vehicle++) {
111 const int64_t start = vehicle_to_start_[vehicle];
112 DCHECK_EQ(vehicle, start_to_vehicle_[start]);
113 if (!IsVarSynced(start)) {
114 return;
115 }
116 CapAddTo(current_vehicle_costs_[vehicle], &current_vehicle_cost_);
117 }
118 }
119 bool InitializeAcceptPath() override {
120 delta_vehicle_cost_ = current_vehicle_cost_;
121 return true;
122 }
123 bool AcceptPath(int64_t path_start, int64_t /*chain_start*/,
124 int64_t /*chain_end*/) override {
125 delta_vehicle_cost_ =
126 CapSub(delta_vehicle_cost_,
127 current_vehicle_costs_[start_to_vehicle_[path_start]]);
128 route_.clear();
129 int64_t node = path_start;
130 while (node < Size()) {
131 route_.push_back(node);
132 node = GetNext(node);
133 }
134 route_.push_back(node);
135 std::optional<int64_t> route_cost = routing_model_.GetRouteCost(route_);
136 if (!route_cost.has_value()) {
137 return false;
138 }
139 CapAddTo(route_cost.value(), &delta_vehicle_cost_);
140 return true;
141 }
142 bool FinalizeAcceptPath(int64_t /*objective_min*/,
143 int64_t objective_max) override {
144 return delta_vehicle_cost_ <= objective_max;
145 }
146
147 const RoutingModel& routing_model_;
148 int64_t current_vehicle_cost_;
149 int64_t delta_vehicle_cost_;
150 std::vector<int64_t> current_vehicle_costs_;
151 std::vector<int64_t> vehicle_to_start_;
152 std::vector<int> start_to_vehicle_;
153 std::vector<int64_t> route_;
154};
155
156} // namespace
157
159 const RoutingModel& routing_model) {
160 return routing_model.solver()->RevAlloc(
161 new RouteConstraintFilter(routing_model));
162}
163
164namespace {
165
166// Max active vehicles filter.
167
168class MaxActiveVehiclesFilter : public IntVarLocalSearchFilter {
169 public:
170 explicit MaxActiveVehiclesFilter(const RoutingModel& routing_model)
171 : IntVarLocalSearchFilter(routing_model.Nexts()),
172 routing_model_(routing_model),
173 is_active_(routing_model.vehicles(), false),
174 active_vehicles_(0) {}
175 ~MaxActiveVehiclesFilter() override = default;
176 std::string DebugString() const override { return "MaxActiveVehiclesFilter"; }
177 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
178 int64_t /*objective_min*/, int64_t /*objective_max*/) override {
179 const int64_t kUnassigned = -1;
180 const Assignment::IntContainer& container = delta->IntVarContainer();
181 int current_active_vehicles = active_vehicles_;
182 for (const IntVarElement& new_element : container.elements()) {
183 IntVar* const var = new_element.Var();
184 int64_t index = kUnassigned;
185 if (FindIndex(var, &index) && routing_model_.IsStart(index)) {
186 if (new_element.Min() != new_element.Max()) {
187 // LNS detected.
188 return true;
189 }
190 const int vehicle = routing_model_.VehicleIndex(index);
191 const bool is_active =
192 (new_element.Min() != routing_model_.End(vehicle));
193 if (is_active && !is_active_[vehicle]) {
194 ++current_active_vehicles;
195 } else if (!is_active && is_active_[vehicle]) {
196 --current_active_vehicles;
197 }
198 }
199 }
200 return current_active_vehicles <=
201 routing_model_.GetMaximumNumberOfActiveVehicles();
202 }
203
204 private:
205 void OnSynchronize(const Assignment* /*delta*/) override {
206 active_vehicles_ = 0;
207 for (int i = 0; i < routing_model_.vehicles(); ++i) {
208 const int index = routing_model_.Start(i);
209 if (IsVarSynced(index) && Value(index) != routing_model_.End(i)) {
210 is_active_[i] = true;
211 ++active_vehicles_;
212 } else {
213 is_active_[i] = false;
214 }
215 }
216 }
217
218 const RoutingModel& routing_model_;
219 std::vector<bool> is_active_;
220 int active_vehicles_;
221};
222} // namespace
223
225 const RoutingModel& routing_model) {
226 return routing_model.solver()->RevAlloc(
227 new MaxActiveVehiclesFilter(routing_model));
228}
229
230namespace {
231
232class SameActivityGroupManager {
233 public:
234 explicit SameActivityGroupManager(const RoutingModel& routing_model)
235 : routing_model_(routing_model) {}
236 int NumberOfGroups() const {
237 return routing_model_.GetSameActivityGroupsCount();
238 }
239 absl::Span<const int> GetGroupsFromNode(int node) const {
240 return absl::MakeConstSpan(routing_model_.GetSameActivityGroups())
241 .subspan(node, 1);
242 }
243 const std::vector<int>& GetGroupNodes(int group) const {
244 return routing_model_.GetSameActivityIndicesOfGroup(group);
245 }
246 void Revert() {}
247 bool CheckGroup(int group, int active, int unknown,
248 const CommittableArray<bool>& /*node_is_active*/,
249 const CommittableArray<bool>& /*node_is_unknown*/) const {
250 const int group_size = GetGroupNodes(group).size();
251 // The group constraint is respected iff either 0 or group size is inside
252 // interval [num_active, num_active + num_unknown],
253 if (active == 0) return true;
254 if (active <= group_size && group_size <= active + unknown) {
255 return true;
256 }
257 return false;
258 }
259
260 private:
261 const RoutingModel& routing_model_;
262};
263
264class OrderedActivityGroupManager {
265 public:
266 explicit OrderedActivityGroupManager(const RoutingModel& routing_model)
267 : routing_model_(routing_model),
268 groups_(routing_model.GetOrderedActivityGroups()),
269 group_bounds_(routing_model.GetOrderedActivityGroups().size(), {0, 0}),
270 disjunction_is_active_(routing_model.GetNumberOfDisjunctions(), false),
271 disjunction_is_inactive_(routing_model.GetNumberOfDisjunctions(),
272 false) {
273 group_nodes_.resize(groups_.size());
274 node_groups_.resize(routing_model.Size());
275 disjunction_groups_.resize(routing_model.GetNumberOfDisjunctions());
276 for (int group = 0; group < groups_.size(); ++group) {
277 absl::flat_hash_map<RoutingModel::DisjunctionIndex, std::vector<int>>
278 disjunction_to_ranks;
279 for (int rank = 0; rank < groups_[group].size(); ++rank) {
280 disjunction_to_ranks[groups_[group][rank]].push_back(rank);
281 }
282 for (RoutingModel::DisjunctionIndex disjunction_index : groups_[group]) {
283 for (int node :
284 routing_model.GetDisjunctionNodeIndices(disjunction_index)) {
285 node_groups_[node].push_back(group);
286 group_nodes_[group].push_back(node);
287 }
288 disjunction_groups_[disjunction_index].push_back({
289 .group = group,
290 .sorted_ranks = disjunction_to_ranks[disjunction_index],
291 });
292 }
293 group_bounds_.Set(group, std::make_pair(0, groups_[group].size() - 1));
294 }
295 group_bounds_.Commit();
296 }
297 int NumberOfGroups() const { return groups_.size(); }
298 absl::Span<const int> GetGroupsFromNode(int node) const {
299 return node_groups_[node];
300 }
301 const std::vector<int>& GetGroupNodes(int group) const {
302 return group_nodes_[group];
303 }
304 void Revert() {
305 group_bounds_.Revert();
306 disjunction_is_active_.Revert();
307 disjunction_is_inactive_.Revert();
308 touched_disjunctions_.clear();
309 }
310 bool CheckGroup(int group, int active, int unknown,
311 CommittableArray<bool>& node_is_active,
312 CommittableArray<bool>& node_is_unknown) {
313 if (active == 0) return true;
314 auto& [min_rank, max_rank] = group_bounds_.GetMutable(group);
315 for (int rank = min_rank; rank <= max_rank; ++rank) {
316 const RoutingModel::DisjunctionIndex disjunction_index =
317 groups_[group][rank];
318 if (IsInactive(disjunction_index, node_is_active, node_is_unknown)) {
319 disjunction_is_inactive_.Set(disjunction_index.value(), true);
320 touched_disjunctions_.push_back(disjunction_index);
321 break;
322 }
323 }
324 for (int rank = max_rank; rank >= min_rank; --rank) {
325 const RoutingModel::DisjunctionIndex disjunction_index =
326 groups_[group][rank];
327 if (IsActive(disjunction_index, node_is_active, node_is_unknown)) {
328 disjunction_is_active_.Set(disjunction_index.value(), true);
329 touched_disjunctions_.push_back(disjunction_index);
330 break;
331 }
332 }
333 while (!touched_disjunctions_.empty()) {
334 const RoutingModel::DisjunctionIndex disjunction_index =
335 touched_disjunctions_.back();
336 touched_disjunctions_.pop_back();
337 if (!Propagate(disjunction_index, node_is_active, node_is_unknown)) {
338 return false;
339 }
340 }
341 return true;
342 }
343 bool Propagate(RoutingModel::DisjunctionIndex disjunction_index,
344 CommittableArray<bool>& node_is_active,
345 CommittableArray<bool>& node_is_unknown) {
346 for (const auto& [group_index, ranks] :
347 disjunction_groups_[disjunction_index]) {
348 const std::vector<RoutingModel::DisjunctionIndex>& group =
349 groups_[group_index];
350 auto& [min_rank, max_rank] = group_bounds_.GetMutable(group_index);
351 if (max_rank < min_rank) continue;
352 if (IsActive(disjunction_index, node_is_active, node_is_unknown)) {
353 // Make all active between min_rank and the last appearance of the
354 // disjunction.
355 int i = ranks.size() - 1;
356 while (i >= 0 && ranks[i] > max_rank) --i;
357 if (i < 0 || ranks[i] < min_rank) continue;
358 int rank = min_rank;
359 while (rank != ranks[i]) {
360 const RoutingModel::DisjunctionIndex current = group[rank];
361 if (IsInactive(current, node_is_active, node_is_unknown)) {
362 return false;
363 }
364 if (!IsActive(current, node_is_active, node_is_unknown)) {
365 disjunction_is_active_.Set(current.value(), true);
366 touched_disjunctions_.push_back(current);
367 }
368 rank++;
369 }
370 min_rank = rank + 1;
371 } else {
372 // Make all inactive between the first appearance of the disjunction and
373 // max_rank.
374 int i = 0;
375 while (i < ranks.size() && ranks[i] < min_rank) ++i;
376 if (i >= ranks.size() || ranks[i] > max_rank) continue;
377 int rank = max_rank;
378 while (rank != ranks[i]) {
379 const RoutingModel::DisjunctionIndex current = group[rank];
380 if (IsActive(current, node_is_active, node_is_unknown)) {
381 return false;
382 }
383 if (!IsInactive(current, node_is_active, node_is_unknown)) {
384 disjunction_is_inactive_.Set(current.value(), true);
385 touched_disjunctions_.push_back(current);
386 }
387 rank--;
388 }
389 max_rank = rank - 1;
390 }
391 }
392 return true;
393 }
394
395 private:
396 bool IsInactive(RoutingModel::DisjunctionIndex disjunction_index,
397 const CommittableArray<bool>& node_is_active,
398 const CommittableArray<bool>& node_is_unknown) const {
399 if (disjunction_is_inactive_.Get(disjunction_index.value())) return true;
400 int num_unknown = 0;
401 int num_active = 0;
402 for (int node :
403 routing_model_.GetDisjunctionNodeIndices(disjunction_index)) {
404 if (node_is_unknown.Get(node)) {
405 ++num_unknown;
406 } else if (node_is_active.Get(node)) {
407 ++num_active;
408 }
409 }
410 return num_unknown <
411 routing_model_.GetDisjunctionMaxCardinality(disjunction_index) -
412 num_active;
413 }
414 bool IsActive(RoutingModel::DisjunctionIndex disjunction_index,
415 const CommittableArray<bool>& node_is_active,
416 const CommittableArray<bool>& node_is_unknown) const {
417 if (disjunction_is_active_.Get(disjunction_index.value())) return true;
418 int num_active = 0;
419 for (int node :
420 routing_model_.GetDisjunctionNodeIndices(disjunction_index)) {
421 if (!node_is_unknown.Get(node) && node_is_active.Get(node)) {
422 ++num_active;
423 }
424 }
425 return num_active >=
426 routing_model_.GetDisjunctionMaxCardinality(disjunction_index);
427 }
428
429 const RoutingModel& routing_model_;
430 const std::vector<std::vector<RoutingModel::DisjunctionIndex>>& groups_;
431 std::vector<std::vector<int>> group_nodes_;
432 std::vector<std::vector<int>> node_groups_;
433 struct DisjunctionGroupInfo {
434 int group;
435 std::vector<int> sorted_ranks;
436 };
437 util_intops::StrongVector<RoutingModel::DisjunctionIndex,
438 std::vector<DisjunctionGroupInfo>>
439 disjunction_groups_;
440 CommittableArray<std::pair<int, int>> group_bounds_;
441 std::vector<RoutingModel::DisjunctionIndex> touched_disjunctions_;
442 CommittableArray<bool> disjunction_is_active_;
443 CommittableArray<bool> disjunction_is_inactive_;
444};
445
446template <typename GroupAccessor>
447class ActiveNodeGroupFilter : public IntVarLocalSearchFilter {
448 public:
449 explicit ActiveNodeGroupFilter(const RoutingModel& routing_model)
450 : IntVarLocalSearchFilter(routing_model.Nexts()),
451 group_accessor_(routing_model),
452 active_count_per_group_(group_accessor_.NumberOfGroups(),
453 {.active = 0, .unknown = 0}),
454 node_is_active_(routing_model.Nexts().size(), false),
455 node_is_unknown_(routing_model.Nexts().size(), false) {}
456
457 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
458 int64_t /*objective_min*/, int64_t /*objective_max*/) override {
459 active_count_per_group_.Revert();
460 node_is_active_.Revert();
461 node_is_unknown_.Revert();
462 group_accessor_.Revert();
463 const Assignment::IntContainer& container = delta->IntVarContainer();
464 // Updating group counters.
465 for (const IntVarElement& new_element : container.elements()) {
466 IntVar* const var = new_element.Var();
467 int64_t index = -1;
468 if (!FindIndex(var, &index)) continue;
469 for (const int group : group_accessor_.GetGroupsFromNode(index)) {
470 ActivityCounts counts = active_count_per_group_.Get(group);
471 // Change contribution to counts: remove old state, add new state.
472 if (node_is_unknown_.Get(index)) --counts.unknown;
473 if (node_is_active_.Get(index)) --counts.active;
474 if (new_element.Min() != new_element.Max()) {
475 ++counts.unknown;
476 } else if (new_element.Min() != index) {
477 ++counts.active;
478 }
479 active_count_per_group_.Set(group, counts);
480 }
481 }
482 // Updating node states.
483 for (const IntVarElement& new_element : container.elements()) {
484 IntVar* const var = new_element.Var();
485 int64_t index = -1;
486 if (!FindIndex(var, &index)) continue;
487 node_is_unknown_.Set(index, new_element.Min() != new_element.Max());
488 node_is_active_.Set(index, new_element.Min() == new_element.Max() &&
489 new_element.Min() != index);
490 }
491 for (const int group : active_count_per_group_.ChangedIndices()) {
492 const ActivityCounts counts = active_count_per_group_.Get(group);
493 if (!group_accessor_.CheckGroup(group, counts.active, counts.unknown,
494 node_is_active_, node_is_unknown_)) {
495 return false;
496 }
497 }
498 return true;
499 }
500 std::string DebugString() const override { return "ActiveNodeGroupFilter"; }
501
502 private:
503 void OnSynchronize(const Assignment* /*delta*/) override {
504 const int num_groups = group_accessor_.NumberOfGroups();
505 for (int group = 0; group < num_groups; ++group) {
506 ActivityCounts counts = {.active = 0, .unknown = 0};
507 for (int node : group_accessor_.GetGroupNodes(group)) {
508 if (IsVarSynced(node)) {
509 const bool is_active = (Value(node) != node);
510 node_is_active_.Set(node, is_active);
511 node_is_unknown_.Set(node, false);
512 counts.active += is_active ? 1 : 0;
513 } else {
514 ++counts.unknown;
515 node_is_unknown_.Set(node, true);
516 node_is_active_.Set(node, false);
517 }
518 }
519 active_count_per_group_.Set(group, counts);
520 }
521 active_count_per_group_.Commit();
522 node_is_active_.Commit();
523 node_is_unknown_.Commit();
524 }
525
526 GroupAccessor group_accessor_;
527 struct ActivityCounts {
528 int active;
529 int unknown;
530 };
531 CommittableArray<ActivityCounts> active_count_per_group_;
532 // node_is_active_[node] is true iff node was synced and active at last
533 // Synchronize().
534 CommittableArray<bool> node_is_active_;
535 // node_is_unknown_[node] is true iff node was not synced at last
536 // Synchronize().
537 CommittableArray<bool> node_is_unknown_;
538};
539
540} // namespace
541
543 const RoutingModel& routing_model) {
544 return routing_model.solver()->RevAlloc(
545 new ActiveNodeGroupFilter<SameActivityGroupManager>(routing_model));
546}
547
549 const RoutingModel& routing_model) {
550 return routing_model.solver()->RevAlloc(
551 new ActiveNodeGroupFilter<OrderedActivityGroupManager>(routing_model));
552}
553
554namespace {
555
556// Node disjunction filter class.
557class NodeDisjunctionFilter : public IntVarLocalSearchFilter {
558 public:
559 explicit NodeDisjunctionFilter(const RoutingModel& routing_model,
560 bool filter_cost)
561 : IntVarLocalSearchFilter(routing_model.Nexts()),
562 routing_model_(routing_model),
563 count_per_disjunction_(routing_model.GetNumberOfDisjunctions(),
564 {.active = 0, .inactive = 0}),
565 synchronized_objective_value_(std::numeric_limits<int64_t>::min()),
566 accepted_objective_value_(std::numeric_limits<int64_t>::min()),
567 filter_cost_(filter_cost),
568 has_mandatory_disjunctions_(routing_model.HasMandatoryDisjunctions()) {}
569
570 using Disjunction = RoutingModel::DisjunctionIndex;
571
572 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
573 int64_t /*objective_min*/, int64_t objective_max) override {
574 count_per_disjunction_.Revert();
575 bool lns_detected = false;
576 // Update the active/inactive counts of each modified disjunction.
577 for (const IntVarElement& element : delta->IntVarContainer().elements()) {
578 int64_t node = -1;
579 if (!FindIndex(element.Var(), &node)) continue;
580 lns_detected |= element.Min() != element.Max();
581 // Compute difference in how this node contributes to activity counts.
582 const bool is_var_synced = IsVarSynced(node);
583 const bool was_active = is_var_synced && Value(node) != node;
584 const bool is_active = node < element.Min() || element.Max() < node;
585 ActivityCount contribution_delta = {.active = 0, .inactive = 0};
586 if (is_var_synced) {
587 contribution_delta.active -= was_active;
588 contribution_delta.inactive -= !was_active;
589 }
590 contribution_delta.active += is_active;
591 contribution_delta.inactive += !is_active;
592 // Common shortcut: if the change is neutral, counts stay the same.
593 if (contribution_delta.active == 0 && contribution_delta.inactive == 0) {
594 continue;
595 }
596 // Change counts of all disjunctions affected by this node.
597 for (const Disjunction disjunction :
598 routing_model_.GetDisjunctionIndices(node)) {
599 ActivityCount new_count =
600 count_per_disjunction_.Get(disjunction.value());
601 new_count.active += contribution_delta.active;
602 new_count.inactive += contribution_delta.inactive;
603 count_per_disjunction_.Set(disjunction.value(), new_count);
604 }
605 }
606 // Check if any disjunction has too many active nodes.
607 for (const int index : count_per_disjunction_.ChangedIndices()) {
608 if (count_per_disjunction_.Get(index).active >
609 routing_model_.GetDisjunctionMaxCardinality(Disjunction(index))) {
610 return false;
611 }
612 }
613 if (lns_detected || (!filter_cost_ && !has_mandatory_disjunctions_)) {
614 accepted_objective_value_ = 0;
615 return true;
616 }
617 // Update penalty costs for disjunctions.
618 accepted_objective_value_ = synchronized_objective_value_;
619 for (const int index : count_per_disjunction_.ChangedIndices()) {
620 // If num inactives did not change, skip. Common shortcut.
621 const int old_inactives =
622 count_per_disjunction_.GetCommitted(index).inactive;
623 const int new_inactives = count_per_disjunction_.Get(index).inactive;
624 if (old_inactives == new_inactives) continue;
625 // If this disjunction has no penalty for inactive nodes, skip.
626 const Disjunction disjunction(index);
627 const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction);
628 if (penalty == 0) continue;
629
630 // Compute the new cost of activity bound violations.
631 const int max_inactives =
632 routing_model_.GetDisjunctionNodeIndices(disjunction).size() -
633 routing_model_.GetDisjunctionMaxCardinality(disjunction);
634 int new_violation = std::max(0, new_inactives - max_inactives);
635 int old_violation = std::max(0, old_inactives - max_inactives);
636 // If nodes are mandatory, there can be no violation.
637 if (penalty < 0 && new_violation > 0) return false;
638 if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) ==
640 new_violation = std::min(1, new_violation);
641 old_violation = std::min(1, old_violation);
642 }
643 CapAddTo(CapProd(penalty, (new_violation - old_violation)),
644 &accepted_objective_value_);
645 }
646 // Only compare to max as a cost lower bound is computed.
647 return accepted_objective_value_ <= objective_max;
648 }
649 std::string DebugString() const override { return "NodeDisjunctionFilter"; }
650 int64_t GetSynchronizedObjectiveValue() const override {
651 return synchronized_objective_value_;
652 }
653 int64_t GetAcceptedObjectiveValue() const override {
654 return accepted_objective_value_;
655 }
656
657 private:
658 void OnSynchronize(const Assignment* /*delta*/) override {
659 synchronized_objective_value_ = 0;
660 count_per_disjunction_.Revert();
661 const int num_disjunctions = routing_model_.GetNumberOfDisjunctions();
662 for (Disjunction disjunction(0); disjunction < num_disjunctions;
663 ++disjunction) {
664 // Count number of active/inactive nodes of this disjunction.
665 ActivityCount count = {.active = 0, .inactive = 0};
666 const auto& nodes = routing_model_.GetDisjunctionNodeIndices(disjunction);
667 for (const int64_t node : nodes) {
668 if (!IsVarSynced(node)) continue;
669 const int is_active = Value(node) != node;
670 count.active += is_active;
671 count.inactive += !is_active;
672 }
673 count_per_disjunction_.Set(disjunction.value(), count);
674 // Add penalty of this disjunction to total cost.
675 if (!filter_cost_) continue;
676 const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction);
677 const int max_actives =
678 routing_model_.GetDisjunctionMaxCardinality(disjunction);
679 int violation = count.inactive - (nodes.size() - max_actives);
680 if (violation > 0 && penalty > 0) {
681 if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) ==
683 violation = std::min(1, violation);
684 }
685 CapAddTo(CapProd(penalty, violation), &synchronized_objective_value_);
686 }
687 }
688 count_per_disjunction_.Commit();
689 accepted_objective_value_ = synchronized_objective_value_;
690 }
691
692 const RoutingModel& routing_model_;
693 struct ActivityCount {
694 int active = 0;
695 int inactive = 0;
696 };
697 CommittableArray<ActivityCount> count_per_disjunction_;
698 int64_t synchronized_objective_value_;
699 int64_t accepted_objective_value_;
700 const bool filter_cost_;
701 const bool has_mandatory_disjunctions_;
702};
703} // namespace
704
706 const RoutingModel& routing_model, bool filter_cost) {
707 return routing_model.solver()->RevAlloc(
708 new NodeDisjunctionFilter(routing_model, filter_cost));
709}
710
711const int64_t BasePathFilter::kUnassigned = -1;
712
713BasePathFilter::BasePathFilter(const std::vector<IntVar*>& nexts,
714 int next_domain_size,
715 const PathsMetadata& paths_metadata)
717 paths_metadata_(paths_metadata),
718 node_path_starts_(next_domain_size, kUnassigned),
719 new_synchronized_unperformed_nodes_(nexts.size()),
720 new_nexts_(nexts.size(), kUnassigned),
721 touched_paths_(nexts.size()),
722 touched_path_chain_start_ends_(nexts.size(), {kUnassigned, kUnassigned}),
723 ranks_(next_domain_size, kUnassigned),
724 lns_detected_(false) {}
725
727 const Assignment* /*deltadelta*/,
728 int64_t objective_min, int64_t objective_max) {
729 lns_detected_ = false;
730 for (const int touched : delta_touched_) {
731 new_nexts_[touched] = kUnassigned;
732 }
733 delta_touched_.clear();
734 const Assignment::IntContainer& container = delta->IntVarContainer();
735 delta_touched_.reserve(container.Size());
736 // Determining touched paths and their touched chain start and ends (a node is
737 // touched if it corresponds to an element of delta or that an element of
738 // delta points to it).
739 // The start and end of a touched path subchain will have remained on the same
740 // path and will correspond to the min and max ranks of touched nodes in the
741 // current assignment.
742 for (int64_t touched_path : touched_paths_.PositionsSetAtLeastOnce()) {
743 touched_path_chain_start_ends_[touched_path] = {kUnassigned, kUnassigned};
744 }
745 touched_paths_.ResetAllToFalse();
746
747 const auto update_touched_path_chain_start_end = [this](int64_t index) {
748 const int64_t start = node_path_starts_[index];
749 if (start == kUnassigned) return;
750 touched_paths_.Set(start);
751
752 int64_t& chain_start = touched_path_chain_start_ends_[start].first;
753 if (chain_start == kUnassigned || paths_metadata_.IsStart(index) ||
754 ranks_[index] < ranks_[chain_start]) {
755 chain_start = index;
756 }
757
758 int64_t& chain_end = touched_path_chain_start_ends_[start].second;
759 if (chain_end == kUnassigned || paths_metadata_.IsEnd(index) ||
760 ranks_[index] > ranks_[chain_end]) {
761 chain_end = index;
762 }
763 };
764
765 for (const IntVarElement& new_element : container.elements()) {
766 IntVar* const var = new_element.Var();
767 int64_t index = kUnassigned;
768 if (FindIndex(var, &index)) {
769 if (!new_element.Bound()) {
770 // LNS detected
771 lns_detected_ = true;
772 return true;
773 }
774 new_nexts_[index] = new_element.Value();
775 delta_touched_.push_back(index);
776 update_touched_path_chain_start_end(index);
777 update_touched_path_chain_start_end(new_nexts_[index]);
778 }
779 }
780 // Checking feasibility of touched paths.
781 if (!InitializeAcceptPath()) return false;
782 for (const int64_t touched_start : touched_paths_.PositionsSetAtLeastOnce()) {
783 const std::pair<int64_t, int64_t> start_end =
784 touched_path_chain_start_ends_[touched_start];
785 if (!AcceptPath(touched_start, start_end.first, start_end.second)) {
786 return false;
787 }
788 }
789 // NOTE: FinalizeAcceptPath() is only called if InitializeAcceptPath() is true
790 // and all paths are accepted.
791 return FinalizeAcceptPath(objective_min, objective_max);
792}
793
794void BasePathFilter::ComputePathStarts(std::vector<int64_t>* path_starts,
795 std::vector<int>* index_to_path) {
796 path_starts->clear();
797 const int nexts_size = Size();
798 index_to_path->assign(nexts_size, kUnassigned);
799 Bitset64<> has_prevs(nexts_size);
800 for (int i = 0; i < nexts_size; ++i) {
801 if (!IsVarSynced(i)) {
802 has_prevs.Set(i);
803 } else {
804 const int next = Value(i);
805 if (next < nexts_size) {
806 has_prevs.Set(next);
807 }
808 }
809 }
810 for (int i = 0; i < nexts_size; ++i) {
811 if (!has_prevs[i]) {
812 (*index_to_path)[i] = path_starts->size();
813 path_starts->push_back(i);
814 }
815 }
816}
817
818void BasePathFilter::SynchronizeFullAssignment() {
819 for (int64_t index = 0; index < Size(); index++) {
820 if (IsVarSynced(index) && Value(index) == index &&
821 node_path_starts_[index] != kUnassigned) {
822 // index was performed before and is now unperformed.
823 new_synchronized_unperformed_nodes_.Set(index);
824 }
825 }
826 // Marking unactive nodes (which are not on a path).
827 node_path_starts_.assign(node_path_starts_.size(), kUnassigned);
828 // Marking nodes on a path and storing next values.
829 const int nexts_size = Size();
830 for (int path = 0; path < NumPaths(); ++path) {
831 const int64_t start = Start(path);
832 node_path_starts_[start] = start;
833 if (IsVarSynced(start)) {
834 int64_t next = Value(start);
835 while (next < nexts_size) {
836 int64_t node = next;
837 node_path_starts_[node] = start;
838 DCHECK(IsVarSynced(node));
839 next = Value(node);
840 }
841 node_path_starts_[next] = start;
842 }
843 node_path_starts_[End(path)] = start;
844 }
845 for (const int touched : delta_touched_) {
846 new_nexts_[touched] = kUnassigned;
847 }
848 delta_touched_.clear();
849 OnBeforeSynchronizePaths(/*synchronizing_all_paths=*/true);
850 UpdateAllRanks();
851 OnAfterSynchronizePaths();
852}
853
855 new_synchronized_unperformed_nodes_.ResetAllToFalse();
856 if (delta == nullptr || delta->Empty() ||
857 absl::c_all_of(ranks_, [](int rank) { return rank == kUnassigned; })) {
858 SynchronizeFullAssignment();
859 return;
860 }
861 const Assignment::IntContainer& container = delta->IntVarContainer();
862 touched_paths_.ResetAllToFalse();
863 for (const IntVarElement& new_element : container.elements()) {
864 int64_t index = kUnassigned;
865 if (FindIndex(new_element.Var(), &index)) {
866 const int64_t start = node_path_starts_[index];
867 if (start != kUnassigned) {
868 touched_paths_.Set(start);
869 if (Value(index) == index) {
870 // New unperformed node (its previous start isn't unassigned).
871 DCHECK_LT(index, new_nexts_.size());
872 new_synchronized_unperformed_nodes_.Set(index);
873 node_path_starts_[index] = kUnassigned;
874 }
875 }
876 }
877 }
878 for (const int touched : delta_touched_) {
879 new_nexts_[touched] = kUnassigned;
880 }
881 delta_touched_.clear();
882 OnBeforeSynchronizePaths(/*synchronizing_all_paths=*/false);
883 for (const int64_t touched_start : touched_paths_.PositionsSetAtLeastOnce()) {
884 int64_t node = touched_start;
885 while (node < Size()) {
886 node_path_starts_[node] = touched_start;
887 node = Value(node);
888 }
889 node_path_starts_[node] = touched_start;
890 UpdatePathRanksFromStart(touched_start);
891 OnSynchronizePathFromStart(touched_start);
892 }
893 OnAfterSynchronizePaths();
894}
895
896void BasePathFilter::UpdateAllRanks() {
897 ranks_.assign(ranks_.size(), kUnassigned);
898 for (int r = 0; r < NumPaths(); ++r) {
899 const int64_t start = Start(r);
900 if (!IsVarSynced(start)) continue;
901 UpdatePathRanksFromStart(start);
902 OnSynchronizePathFromStart(start);
903 }
904}
905
906void BasePathFilter::UpdatePathRanksFromStart(int start) {
907 int rank = 0;
908 int64_t node = start;
909 while (node < Size()) {
910 ranks_[node] = rank;
911 rank++;
912 node = Value(node);
913 }
914 ranks_[node] = rank;
915}
916
917namespace {
918
919class VehicleAmortizedCostFilter : public BasePathFilter {
920 public:
921 explicit VehicleAmortizedCostFilter(const RoutingModel& routing_model);
922 ~VehicleAmortizedCostFilter() override = default;
923 std::string DebugString() const override {
924 return "VehicleAmortizedCostFilter";
925 }
926 int64_t GetSynchronizedObjectiveValue() const override {
927 return current_vehicle_cost_;
928 }
929 int64_t GetAcceptedObjectiveValue() const override {
930 return lns_detected() ? 0 : delta_vehicle_cost_;
931 }
932
933 private:
934 void OnSynchronizePathFromStart(int64_t start) override;
935 void OnAfterSynchronizePaths() override;
936 bool InitializeAcceptPath() override;
937 bool AcceptPath(int64_t path_start, int64_t chain_start,
938 int64_t chain_end) override;
939 bool FinalizeAcceptPath(int64_t objective_min,
940 int64_t objective_max) override;
941
942 int64_t current_vehicle_cost_;
943 int64_t delta_vehicle_cost_;
944 std::vector<int> current_route_lengths_;
945 std::vector<int64_t> start_to_end_;
946 std::vector<int> start_to_vehicle_;
947 std::vector<int64_t> vehicle_to_start_;
948 const std::vector<int64_t>& linear_cost_factor_of_vehicle_;
949 const std::vector<int64_t>& quadratic_cost_factor_of_vehicle_;
950};
951
952VehicleAmortizedCostFilter::VehicleAmortizedCostFilter(
953 const RoutingModel& routing_model)
954 : BasePathFilter(routing_model.Nexts(),
955 routing_model.Size() + routing_model.vehicles(),
956 routing_model.GetPathsMetadata()),
957 current_vehicle_cost_(0),
958 delta_vehicle_cost_(0),
959 current_route_lengths_(Size(), -1),
960 linear_cost_factor_of_vehicle_(
961 routing_model.GetAmortizedLinearCostFactorOfVehicles()),
962 quadratic_cost_factor_of_vehicle_(
963 routing_model.GetAmortizedQuadraticCostFactorOfVehicles()) {
964 start_to_end_.resize(Size(), -1);
965 start_to_vehicle_.resize(Size(), -1);
966 vehicle_to_start_.resize(routing_model.vehicles());
967 for (int v = 0; v < routing_model.vehicles(); v++) {
968 const int64_t start = routing_model.Start(v);
969 start_to_vehicle_[start] = v;
970 start_to_end_[start] = routing_model.End(v);
971 vehicle_to_start_[v] = start;
972 }
973}
974
975void VehicleAmortizedCostFilter::OnSynchronizePathFromStart(int64_t start) {
976 const int64_t end = start_to_end_[start];
977 CHECK_GE(end, 0);
978 const int route_length = Rank(end) - 1;
979 CHECK_GE(route_length, 0);
980 current_route_lengths_[start] = route_length;
981}
982
983void VehicleAmortizedCostFilter::OnAfterSynchronizePaths() {
984 current_vehicle_cost_ = 0;
985 for (int vehicle = 0; vehicle < vehicle_to_start_.size(); vehicle++) {
986 const int64_t start = vehicle_to_start_[vehicle];
987 DCHECK_EQ(vehicle, start_to_vehicle_[start]);
988 if (!IsVarSynced(start)) {
989 return;
990 }
991 const int route_length = current_route_lengths_[start];
992 DCHECK_GE(route_length, 0);
993
994 if (route_length == 0) {
995 // The path is empty.
996 continue;
997 }
998
999 const int64_t linear_cost_factor = linear_cost_factor_of_vehicle_[vehicle];
1000 const int64_t route_length_cost =
1001 CapProd(quadratic_cost_factor_of_vehicle_[vehicle],
1002 route_length * route_length);
1003
1004 CapAddTo(CapSub(linear_cost_factor, route_length_cost),
1005 &current_vehicle_cost_);
1006 }
1007}
1008
1009bool VehicleAmortizedCostFilter::InitializeAcceptPath() {
1010 delta_vehicle_cost_ = current_vehicle_cost_;
1011 return true;
1012}
1013
1014bool VehicleAmortizedCostFilter::AcceptPath(int64_t path_start,
1015 int64_t chain_start,
1016 int64_t chain_end) {
1017 // Number of nodes previously between chain_start and chain_end
1018 const int previous_chain_nodes = Rank(chain_end) - 1 - Rank(chain_start);
1019 CHECK_GE(previous_chain_nodes, 0);
1020 int new_chain_nodes = 0;
1021 int64_t node = GetNext(chain_start);
1022 while (node != chain_end) {
1023 new_chain_nodes++;
1024 node = GetNext(node);
1025 }
1026
1027 const int previous_route_length = current_route_lengths_[path_start];
1028 CHECK_GE(previous_route_length, 0);
1029 const int new_route_length =
1030 previous_route_length - previous_chain_nodes + new_chain_nodes;
1031
1032 const int vehicle = start_to_vehicle_[path_start];
1033 CHECK_GE(vehicle, 0);
1034 DCHECK_EQ(path_start, vehicle_to_start_[vehicle]);
1035
1036 // Update the cost related to used vehicles.
1037 // TODO(user): Handle possible overflows.
1038 if (previous_route_length == 0) {
1039 // The route was empty before, it is no longer the case (changed path).
1040 CHECK_GT(new_route_length, 0);
1041 CapAddTo(linear_cost_factor_of_vehicle_[vehicle], &delta_vehicle_cost_);
1042 } else if (new_route_length == 0) {
1043 // The route is now empty.
1044 delta_vehicle_cost_ =
1045 CapSub(delta_vehicle_cost_, linear_cost_factor_of_vehicle_[vehicle]);
1046 }
1047
1048 // Update the cost related to the sum of the squares of the route lengths.
1049 const int64_t quadratic_cost_factor =
1050 quadratic_cost_factor_of_vehicle_[vehicle];
1051 CapAddTo(CapProd(quadratic_cost_factor,
1052 previous_route_length * previous_route_length),
1053 &delta_vehicle_cost_);
1054 delta_vehicle_cost_ = CapSub(
1055 delta_vehicle_cost_,
1056 CapProd(quadratic_cost_factor, new_route_length * new_route_length));
1057
1058 return true;
1059}
1060
1061bool VehicleAmortizedCostFilter::FinalizeAcceptPath(int64_t /*objective_min*/,
1062 int64_t objective_max) {
1063 return delta_vehicle_cost_ <= objective_max;
1064}
1065
1066} // namespace
1067
1069 const RoutingModel& routing_model) {
1070 return routing_model.solver()->RevAlloc(
1071 new VehicleAmortizedCostFilter(routing_model));
1072}
1073
1074namespace {
1075
1076// TODO(user): Make this filter use PathStates.
1077// TODO(user): Optimize the case where same vehicle groups are disjoint and
1078// deltas are not "splitting" the groups.
1079class SameVehicleCostFilter : public BasePathFilter {
1080 public:
1081 explicit SameVehicleCostFilter(const RoutingModel& model)
1082 : BasePathFilter(model.Nexts(), model.Size() + model.vehicles(),
1083 model.GetPathsMetadata()),
1084 model_(model),
1085 same_vehicle_costs_per_node_(model.Size()),
1086 nodes_per_vehicle_(model.GetNumberOfSoftSameVehicleConstraints()),
1087 new_nodes_per_vehicle_(model.GetNumberOfSoftSameVehicleConstraints()),
1088 current_vehicle_per_node_(model.Size()),
1089 current_cost_(0) {
1090 for (int i = 0; i < model.GetNumberOfSoftSameVehicleConstraints(); ++i) {
1091 for (int node : model.GetSoftSameVehicleIndices(i)) {
1092 same_vehicle_costs_per_node_[node].push_back(i);
1093 }
1094 }
1095 start_to_vehicle_.resize(Size(), -1);
1096 for (int v = 0; v < model.vehicles(); v++) {
1097 const int64_t start = model.Start(v);
1098 start_to_vehicle_[start] = v;
1099 }
1100 }
1101 int64_t GetSynchronizedObjectiveValue() const override {
1102 return current_cost_;
1103 }
1104 int64_t GetAcceptedObjectiveValue() const override {
1105 return lns_detected() ? 0 : delta_cost_;
1106 }
1107 std::string DebugString() const override { return "SameVehicleCostFilter"; }
1108
1109 private:
1110 bool InitializeAcceptPath() override {
1111 delta_cost_ = current_cost_;
1112 for (int same_vehicle_cost_index : touched_) {
1113 new_nodes_per_vehicle_[same_vehicle_cost_index] =
1114 nodes_per_vehicle_[same_vehicle_cost_index];
1115 }
1116 touched_.clear();
1117 return true;
1118 }
1119 bool AcceptPath(int64_t path_start, int64_t chain_start,
1120 int64_t chain_end) override {
1121 const int64_t vehicle = start_to_vehicle_[path_start];
1122 DCHECK_GE(vehicle, 0);
1123 if (chain_start == chain_end) return true;
1124 for (int64_t node = GetNext(chain_start); node != chain_end;
1125 node = GetNext(node)) {
1126 if (vehicle == current_vehicle_per_node_[node]) continue;
1127 for (int same_vehicle_cost_index : same_vehicle_costs_per_node_[node]) {
1128 auto& new_nodes = new_nodes_per_vehicle_[same_vehicle_cost_index];
1129 new_nodes[vehicle]++;
1130 new_nodes[current_vehicle_per_node_[node]]--;
1131 if (new_nodes[current_vehicle_per_node_[node]] == 0) {
1132 new_nodes.erase(current_vehicle_per_node_[node]);
1133 }
1134 touched_.insert(same_vehicle_cost_index);
1135 }
1136 }
1137 return true;
1138 }
1139 bool FinalizeAcceptPath(int64_t /*objective_min*/,
1140 int64_t objective_max) override {
1141 for (int same_vehicle_cost_index : touched_) {
1142 CapAddTo(CapSub(GetCost(same_vehicle_cost_index, new_nodes_per_vehicle_),
1143 GetCost(same_vehicle_cost_index, nodes_per_vehicle_)),
1144 &delta_cost_);
1145 }
1146 return delta_cost_ <= objective_max;
1147 }
1148
1149 void OnAfterSynchronizePaths() override {
1150 current_cost_ = 0;
1151 touched_.clear();
1152 for (int same_vehicle_cost_index = 0;
1153 same_vehicle_cost_index < nodes_per_vehicle_.size();
1154 ++same_vehicle_cost_index) {
1155 nodes_per_vehicle_[same_vehicle_cost_index].clear();
1156 touched_.insert(same_vehicle_cost_index);
1157 }
1158 current_vehicle_per_node_.assign(model_.Size(), -1);
1159 for (int v = 0; v < model_.vehicles(); ++v) {
1160 int64_t node = GetNext(model_.Start(v));
1161 DCHECK(model_.IsEnd(node) || IsVarSynced(node));
1162 while (!model_.IsEnd(node)) {
1163 for (int same_vehicle_cost_index : same_vehicle_costs_per_node_[node]) {
1164 nodes_per_vehicle_[same_vehicle_cost_index][v]++;
1165 }
1166 current_vehicle_per_node_[node] = v;
1167 node = GetNext(node);
1168 }
1169 }
1170 for (int same_vehicle_cost_index = 0;
1171 same_vehicle_cost_index < nodes_per_vehicle_.size();
1172 ++same_vehicle_cost_index) {
1173 CapAddTo(GetCost(same_vehicle_cost_index, nodes_per_vehicle_),
1174 &current_cost_);
1175 }
1176 }
1177 int64_t GetCost(
1178 int index,
1179 absl::Span<const absl::flat_hash_map<int, int>> nodes_per_vehicle) const {
1180 const int num_vehicles_used = nodes_per_vehicle[index].size();
1181 if (num_vehicles_used <= 1) return 0;
1182 return CapProd(num_vehicles_used - 1, model_.GetSoftSameVehicleCost(index));
1183 }
1184
1185 const RoutingModel& model_;
1186 std::vector<int> start_to_vehicle_;
1187 std::vector<std::vector<int>> same_vehicle_costs_per_node_;
1188 std::vector<absl::flat_hash_map<int, int>> nodes_per_vehicle_;
1189 std::vector<absl::flat_hash_map<int, int>> new_nodes_per_vehicle_;
1190 absl::flat_hash_set<int> touched_;
1191 std::vector<int> current_vehicle_per_node_;
1192 int64_t current_cost_;
1193 int64_t delta_cost_;
1194};
1195
1196} // namespace
1197
1199 const RoutingModel& routing_model) {
1200 return routing_model.solver()->RevAlloc(
1201 new SameVehicleCostFilter(routing_model));
1202}
1203
1204namespace {
1205
1206class TypeRegulationsFilter : public BasePathFilter {
1207 public:
1208 explicit TypeRegulationsFilter(const RoutingModel& model);
1209 ~TypeRegulationsFilter() override = default;
1210 std::string DebugString() const override { return "TypeRegulationsFilter"; }
1211
1212 private:
1213 void OnSynchronizePathFromStart(int64_t start) override;
1214 bool AcceptPath(int64_t path_start, int64_t chain_start,
1215 int64_t chain_end) override;
1216
1217 bool HardIncompatibilitiesRespected(int vehicle, int64_t chain_start,
1218 int64_t chain_end);
1219
1220 const RoutingModel& routing_model_;
1221 std::vector<int> start_to_vehicle_;
1222 // The following vector is used to keep track of the type counts for hard
1223 // incompatibilities.
1224 std::vector<std::vector<int>> hard_incompatibility_type_counts_per_vehicle_;
1225 // Used to verify the temporal incompatibilities and requirements.
1226 TypeIncompatibilityChecker temporal_incompatibility_checker_;
1227 TypeRequirementChecker requirement_checker_;
1228};
1229
1230TypeRegulationsFilter::TypeRegulationsFilter(const RoutingModel& model)
1231 : BasePathFilter(model.Nexts(), model.Size() + model.vehicles(),
1232 model.GetPathsMetadata()),
1233 routing_model_(model),
1234 start_to_vehicle_(model.Size(), -1),
1235 temporal_incompatibility_checker_(model,
1236 /*check_hard_incompatibilities*/ false),
1237 requirement_checker_(model) {
1238 const int num_vehicles = model.vehicles();
1239 const bool has_hard_type_incompatibilities =
1240 model.HasHardTypeIncompatibilities();
1241 if (has_hard_type_incompatibilities) {
1242 hard_incompatibility_type_counts_per_vehicle_.resize(num_vehicles);
1243 }
1244 const int num_visit_types = model.GetNumberOfVisitTypes();
1245 for (int vehicle = 0; vehicle < num_vehicles; vehicle++) {
1246 const int64_t start = model.Start(vehicle);
1247 start_to_vehicle_[start] = vehicle;
1248 if (has_hard_type_incompatibilities) {
1249 hard_incompatibility_type_counts_per_vehicle_[vehicle].resize(
1250 num_visit_types, 0);
1251 }
1252 }
1253}
1254
1255void TypeRegulationsFilter::OnSynchronizePathFromStart(int64_t start) {
1256 if (!routing_model_.HasHardTypeIncompatibilities()) return;
1257
1258 const int vehicle = start_to_vehicle_[start];
1259 CHECK_GE(vehicle, 0);
1260 std::vector<int>& type_counts =
1261 hard_incompatibility_type_counts_per_vehicle_[vehicle];
1262 std::fill(type_counts.begin(), type_counts.end(), 0);
1263 const int num_types = type_counts.size();
1264
1265 int64_t node = start;
1266 while (node < Size()) {
1267 DCHECK(IsVarSynced(node));
1268 const int type = routing_model_.GetVisitType(node);
1269 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
1270 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
1271 CHECK_LT(type, num_types);
1272 type_counts[type]++;
1273 }
1274 node = Value(node);
1275 }
1276}
1277
1278bool TypeRegulationsFilter::HardIncompatibilitiesRespected(int vehicle,
1279 int64_t chain_start,
1280 int64_t chain_end) {
1281 if (!routing_model_.HasHardTypeIncompatibilities()) return true;
1282
1283 const std::vector<int>& previous_type_counts =
1284 hard_incompatibility_type_counts_per_vehicle_[vehicle];
1285
1286 absl::flat_hash_map</*type*/ int, /*new_count*/ int> new_type_counts;
1287 absl::flat_hash_set<int> types_to_check;
1288
1289 // Go through the new nodes on the path and increment their type counts.
1290 int64_t node = GetNext(chain_start);
1291 while (node != chain_end) {
1292 const int type = routing_model_.GetVisitType(node);
1293 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
1294 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
1295 DCHECK_LT(type, previous_type_counts.size());
1296 int& type_count = gtl::LookupOrInsert(&new_type_counts, type,
1297 previous_type_counts[type]);
1298 if (type_count++ == 0) {
1299 // New type on the route, mark to check its incompatibilities.
1300 types_to_check.insert(type);
1301 }
1302 }
1303 node = GetNext(node);
1304 }
1305
1306 // Update new_type_counts by decrementing the occurrence of the types of the
1307 // nodes no longer on the route.
1308 if (IsVarSynced(chain_start)) {
1309 node = Value(chain_start);
1310 while (node != chain_end) {
1311 const int type = routing_model_.GetVisitType(node);
1312 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
1313 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
1314 DCHECK_LT(type, previous_type_counts.size());
1315 int& type_count = gtl::LookupOrInsert(&new_type_counts, type,
1316 previous_type_counts[type]);
1317 CHECK_GE(type_count, 1);
1318 type_count--;
1319 }
1320 node = Value(node);
1321 }
1322 }
1323
1324 // Check the incompatibilities for types in types_to_check.
1325 for (int type : types_to_check) {
1326 for (int incompatible_type :
1327 routing_model_.GetHardTypeIncompatibilitiesOfType(type)) {
1328 if (gtl::FindWithDefault(new_type_counts, incompatible_type,
1329 previous_type_counts[incompatible_type]) > 0) {
1330 return false;
1331 }
1332 }
1333 }
1334 return true;
1335}
1336
1337bool TypeRegulationsFilter::AcceptPath(int64_t path_start, int64_t chain_start,
1338 int64_t chain_end) {
1339 const int vehicle = start_to_vehicle_[path_start];
1340 CHECK_GE(vehicle, 0);
1341 const auto next_accessor = [this](int64_t node) { return GetNext(node); };
1342 return HardIncompatibilitiesRespected(vehicle, chain_start, chain_end) &&
1343 temporal_incompatibility_checker_.CheckVehicle(vehicle,
1344 next_accessor) &&
1345 requirement_checker_.CheckVehicle(vehicle, next_accessor);
1346}
1347
1348} // namespace
1349
1351 const RoutingModel& routing_model) {
1352 return routing_model.solver()->RevAlloc(
1353 new TypeRegulationsFilter(routing_model));
1354}
1355
1356namespace {
1357
1358// ChainCumul filter. Version of dimension path filter which is O(delta) rather
1359// than O(length of touched paths). Currently only supports dimensions without
1360// costs (global and local span cost, soft bounds) and with unconstrained
1361// cumul variables except overall capacity and cumul variables of path ends.
1362
1363class ChainCumulFilter : public BasePathFilter {
1364 public:
1365 ChainCumulFilter(const RoutingModel& routing_model,
1366 const RoutingDimension& dimension);
1367 ~ChainCumulFilter() override = default;
1368 std::string DebugString() const override {
1369 return "ChainCumulFilter(" + name_ + ")";
1370 }
1371
1372 private:
1373 void OnSynchronizePathFromStart(int64_t start) override;
1374 bool AcceptPath(int64_t path_start, int64_t chain_start,
1375 int64_t chain_end) override;
1376
1377 const std::vector<IntVar*> cumuls_;
1378 std::vector<int64_t> start_to_vehicle_;
1379 std::vector<int64_t> start_to_end_;
1380 std::vector<const RoutingModel::TransitCallback2*> evaluators_;
1381 const std::vector<int64_t> vehicle_capacities_;
1382 std::vector<int64_t> current_path_cumul_mins_;
1383 std::vector<int64_t> current_max_of_path_end_cumul_mins_;
1384 std::vector<int64_t> old_nexts_;
1385 std::vector<int> old_vehicles_;
1386 std::vector<int64_t> current_transits_;
1387 const std::string name_;
1388};
1389
1390ChainCumulFilter::ChainCumulFilter(const RoutingModel& routing_model,
1391 const RoutingDimension& dimension)
1392 : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(),
1393 routing_model.GetPathsMetadata()),
1394 cumuls_(dimension.cumuls()),
1395 evaluators_(routing_model.vehicles(), nullptr),
1396 vehicle_capacities_(dimension.vehicle_capacities()),
1397 current_path_cumul_mins_(dimension.cumuls().size(), 0),
1398 current_max_of_path_end_cumul_mins_(dimension.cumuls().size(), 0),
1399 old_nexts_(routing_model.Size(), kUnassigned),
1400 old_vehicles_(routing_model.Size(), kUnassigned),
1401 current_transits_(routing_model.Size(), 0),
1402 name_(dimension.name()) {
1403 start_to_vehicle_.resize(Size(), -1);
1404 start_to_end_.resize(Size(), -1);
1405 for (int i = 0; i < routing_model.vehicles(); ++i) {
1406 start_to_vehicle_[routing_model.Start(i)] = i;
1407 start_to_end_[routing_model.Start(i)] = routing_model.End(i);
1408 evaluators_[i] = &dimension.transit_evaluator(i);
1409 }
1410}
1411
1412// On synchronization, maintain "propagated" cumul mins and max level of cumul
1413// from each node to the end of the path; to be used by AcceptPath to
1414// incrementally check feasibility.
1415void ChainCumulFilter::OnSynchronizePathFromStart(int64_t start) {
1416 const int vehicle = start_to_vehicle_[start];
1417 std::vector<int64_t> path_nodes;
1418 int64_t node = start;
1419 int64_t cumul = cumuls_[node]->Min();
1420 while (node < Size()) {
1421 path_nodes.push_back(node);
1422 current_path_cumul_mins_[node] = cumul;
1423 const int64_t next = Value(node);
1424 if (next != old_nexts_[node] || vehicle != old_vehicles_[node]) {
1425 old_nexts_[node] = next;
1426 old_vehicles_[node] = vehicle;
1427 current_transits_[node] = (*evaluators_[vehicle])(node, next);
1428 }
1429 CapAddTo(current_transits_[node], &cumul);
1430 cumul = std::max(cumuls_[next]->Min(), cumul);
1431 node = next;
1432 }
1433 path_nodes.push_back(node);
1434 current_path_cumul_mins_[node] = cumul;
1435 int64_t max_cumuls = cumul;
1436 for (int i = path_nodes.size() - 1; i >= 0; --i) {
1437 const int64_t node = path_nodes[i];
1438 max_cumuls = std::max(max_cumuls, current_path_cumul_mins_[node]);
1439 current_max_of_path_end_cumul_mins_[node] = max_cumuls;
1440 }
1441}
1442
1443// The complexity of the method is O(size of chain (chain_start...chain_end).
1444bool ChainCumulFilter::AcceptPath(int64_t path_start, int64_t chain_start,
1445 int64_t chain_end) {
1446 const int vehicle = start_to_vehicle_[path_start];
1447 const int64_t capacity = vehicle_capacities_[vehicle];
1448 int64_t node = chain_start;
1449 int64_t cumul = current_path_cumul_mins_[node];
1450 while (node != chain_end) {
1451 const int64_t next = GetNext(node);
1452 if (IsVarSynced(node) && next == Value(node) &&
1453 vehicle == old_vehicles_[node]) {
1454 CapAddTo(current_transits_[node], &cumul);
1455 } else {
1456 CapAddTo((*evaluators_[vehicle])(node, next), &cumul);
1457 }
1458 cumul = std::max(cumuls_[next]->Min(), cumul);
1459 if (cumul > capacity) return false;
1460 node = next;
1461 }
1462 const int64_t end = start_to_end_[path_start];
1463 const int64_t end_cumul_delta =
1464 CapSub(current_path_cumul_mins_[end], current_path_cumul_mins_[node]);
1465 const int64_t after_chain_cumul_delta =
1466 CapSub(current_max_of_path_end_cumul_mins_[node],
1467 current_path_cumul_mins_[node]);
1468 return CapAdd(cumul, after_chain_cumul_delta) <= capacity &&
1469 CapAdd(cumul, end_cumul_delta) <= cumuls_[end]->Max();
1470}
1471
1472} // namespace
1473
1475 int path, int64_t capacity, int64_t span_upper_bound,
1476 absl::Span<const DimensionValues::Interval> cumul_of_node,
1477 absl::Span<const DimensionValues::Interval> slack_of_node,
1478 absl::AnyInvocable<int64_t(int64_t, int64_t) const> evaluator,
1479 DimensionValues& dimension_values) {
1480 using Interval = DimensionValues::Interval;
1481 // Copy cumul min/max data from cumul variables.
1482 const int num_nodes = dimension_values.NumNodes(path);
1483 absl::Span<const int> nodes = dimension_values.Nodes(path);
1484 absl::Span<Interval> cumuls = dimension_values.MutableCumuls(path);
1485 for (int r = 0; r < num_nodes; ++r) {
1486 const int node = nodes[r];
1487 Interval cumul = cumul_of_node[node];
1488 if (!cumul.DecreaseMax(capacity)) return false;
1489 cumuls[r] = cumul;
1490 }
1491 // Extract travel data.
1492 // TODO(user): refine this logic to avoid more calls, using PathState
1493 // chains to find chains in the middle of the path, and vehicle travel class
1494 // to reuse chains from different vehicles.
1495 absl::Span<int64_t> travels = dimension_values.MutableTravels(path);
1496 {
1497 absl::Span<const int> cnodes = dimension_values.CommittedNodes(path);
1498 absl::Span<const int64_t> ctravels =
1499 dimension_values.CommittedTravels(path);
1500 // Reuse committed travels to avoid calling evaluator.
1501 // Split [0, num_nodes) into [0, i), [i, j), [j, num_nodes),
1502 // so that:
1503 // - nodes[r] == cnodes[r] for r in [0, i)
1504 // - nodes[r] == cnodes[r + delta] for r in [j, num_nodes)
1505 // with delta = num_cnodes - num_nodes.
1506 const int i_limit = std::min(cnodes.size(), nodes.size());
1507 DCHECK(cnodes.empty() || cnodes[0] == nodes[0]);
1508 int i = 1;
1509 while (i < i_limit && cnodes[i] == nodes[i]) {
1510 travels[i - 1] = ctravels[i - 1];
1511 ++i;
1512 }
1513 DCHECK(cnodes.empty() || cnodes.back() == nodes.back());
1514 int j = num_nodes - 2;
1515 const int delta = cnodes.size() - num_nodes;
1516 const int j_limit = i + std::max(0, -delta);
1517 while (j_limit <= j && nodes[j] == cnodes[j + delta]) {
1518 travels[j] = ctravels[j + delta];
1519 --j;
1520 }
1521 ++j;
1522 for (int r = i; r <= j; ++r) {
1523 const int64_t travel = evaluator(nodes[r - 1], nodes[r]);
1524 travels[r - 1] = travel;
1525 }
1526 }
1527 // Extract transit data, fill partial travel sums.
1528 absl::Span<Interval> transits = dimension_values.MutableTransits(path);
1529 absl::Span<int64_t> travel_sums = dimension_values.MutableTravelSums(path);
1530 int64_t total_travel = 0;
1531 travel_sums[0] = 0;
1532 for (int r = 1; r < num_nodes; ++r) {
1533 const int64_t travel = travels[r - 1];
1534 CapAddTo(travel, &total_travel);
1535 travel_sums[r] = total_travel;
1536 Interval transit{.min = travel, .max = travel};
1537 transit.Add(slack_of_node[nodes[r - 1]]);
1538 transits[r - 1] = transit;
1539 }
1540 if (travel_sums.back() > span_upper_bound) return false;
1541 dimension_values.MutableSpan(path) = {.min = travel_sums.back(),
1542 .max = span_upper_bound};
1543 return true;
1544}
1545
1546// Fills pre- and post-visits of a path, using pre/post-travel evaluators.
1547// NOTE: The visit at node A uses time interval
1548// [cumul(A) - previsit(A), cumul(A) + postvisit(A)).
1549// The pre-travel of travel A->B is the post-visit of A,
1550// and the post-travel of travel A->B is the pre-visit of B.
1551// TODO(user): when PathCumulFilter uses a PathState, replace
1552// dimension_values by a PathState.
1553// TODO(user): use committed values as a cache to avoid calling evaluators.
1555 int path, const DimensionValues& dimension_values,
1556 std::optional<absl::AnyInvocable<int64_t(int64_t, int64_t) const>>
1557 pre_travel_evaluator,
1558 std::optional<absl::AnyInvocable<int64_t(int64_t, int64_t) const>>
1559 post_travel_evaluator,
1560 PrePostVisitValues& visit_values) {
1561 const int num_nodes = dimension_values.NumNodes(path);
1562 visit_values.ChangePathSize(path, num_nodes);
1563 absl::Span<int64_t> pre_visits = visit_values.MutablePreVisits(path);
1564 absl::Span<int64_t> post_visits = visit_values.MutablePostVisits(path);
1565 absl::Span<const int> nodes = dimension_values.Nodes(path);
1566 if (pre_travel_evaluator.has_value()) {
1567 for (int i = 0; i < num_nodes - 1; ++i) {
1568 post_visits[i] = (*pre_travel_evaluator)(nodes[i], nodes[i + 1]);
1569 }
1570 post_visits.back() = 0;
1571 } else {
1572 absl::c_fill(post_visits, 0);
1573 }
1574 if (post_travel_evaluator.has_value()) {
1575 pre_visits[0] = 0;
1576 for (int i = 1; i < num_nodes; ++i) {
1577 pre_visits[i] = (*post_travel_evaluator)(nodes[i - 1], nodes[i]);
1578 }
1579 } else {
1580 absl::c_fill(pre_visits, 0);
1581 }
1582}
1583
1585 int path, DimensionValues& dimension_values,
1586 absl::Span<const std::pair<int64_t, int64_t>> interbreaks) {
1587 using Interval = DimensionValues::Interval;
1588 using VehicleBreak = DimensionValues::VehicleBreak;
1589 const int64_t total_travel = dimension_values.TravelSums(path).back();
1590 // Improve bounds on span/start max/end min using time windows: breaks that
1591 // must occur inside the path have their duration accumulated into
1592 // lb_span_tw, they also widen [start_max, end_min).
1593 int64_t lb_span_tw = total_travel;
1594 const absl::Span<Interval> cumuls = dimension_values.MutableCumuls(path);
1595 Interval& start = cumuls.front();
1596 Interval& end = cumuls.back();
1597 // TODO(user): consider adding deductions from the path to the break.
1598 for (const VehicleBreak& br : dimension_values.MutableVehicleBreaks(path)) {
1599 // If the break may be performed before the path, after the path, or if it
1600 // is not performed at all, ignore the break.
1601 if (br.is_performed.min == 0) continue;
1602 if (br.end.min <= start.max || end.min <= br.start.max) continue;
1603 // This break must be performed inside the path: widen the path.
1604 CapAddTo(br.duration.min, &lb_span_tw);
1605 if (!start.DecreaseMax(br.start.max)) return false;
1606 if (!end.IncreaseMin(br.end.min)) return false;
1607 }
1608 Interval& span = dimension_values.MutableSpan(path);
1609 if (!span.IncreaseMin(std::max(lb_span_tw, CapSub(end.min, start.max)))) {
1610 return false;
1611 }
1612 // Compute num_feasible_breaks = number of breaks that may fit into route,
1613 // and [breaks_start_min, breaks_end_max) = max coverage of breaks.
1614 int64_t break_start_min = kint64max;
1615 int64_t break_end_max = kint64min;
1616
1617 if (!start.IncreaseMin(CapSub(end.min, span.max))) return false;
1618 if (!end.DecreaseMax(CapAdd(start.max, span.max))) return false;
1619
1620 int num_feasible_breaks = 0;
1621 for (const VehicleBreak& br : dimension_values.MutableVehicleBreaks(path)) {
1622 if (start.min <= br.start.max && br.end.min <= end.max) {
1623 break_start_min = std::min(break_start_min, br.start.min);
1624 break_end_max = std::max(break_end_max, br.end.max);
1625 ++num_feasible_breaks;
1626 }
1627 }
1628 // Improve span/start min/end max using interbreak limits: there must be
1629 // enough breaks inside the path, so that for each limit, the union of
1630 // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end),
1631 // or [start, end) is shorter than max_interbreak.
1632 for (const auto& [max_interbreak, min_break_duration] : interbreaks) {
1633 // Minimal number of breaks depends on total travel:
1634 // 0 breaks for 0 <= total travel <= limit,
1635 // 1 break for limit + 1 <= total travel <= 2 * limit,
1636 // i breaks for i * limit + 1 <= total travel <= (i+1) * limit, ...
1637 if (max_interbreak == 0) {
1638 if (total_travel > 0) return false;
1639 continue;
1640 }
1641 int64_t min_num_breaks =
1642 std::max<int64_t>(0, (total_travel - 1) / max_interbreak);
1643 if (span.min > max_interbreak) {
1644 min_num_breaks = std::max<int64_t>(min_num_breaks, 1);
1645 }
1646 if (min_num_breaks > num_feasible_breaks) return false;
1647 if (!span.IncreaseMin(CapAdd(
1648 total_travel, CapProd(min_num_breaks, min_break_duration)))) {
1649 return false;
1650 }
1651 if (min_num_breaks > 0) {
1652 if (!start.IncreaseMin(CapSub(break_start_min, max_interbreak))) {
1653 return false;
1654 }
1655 if (!end.DecreaseMax(CapAdd(break_end_max, max_interbreak))) {
1656 return false;
1657 }
1658 }
1659 }
1660 return start.DecreaseMax(CapSub(end.max, span.min)) &&
1661 end.IncreaseMin(CapAdd(start.min, span.min));
1662}
1663
1664namespace {
1665
1666class PathCumulFilter : public BasePathFilter {
1667 public:
1668 PathCumulFilter(const RoutingModel& routing_model,
1669 const RoutingDimension& dimension,
1670 bool propagate_own_objective_value,
1671 bool filter_objective_cost, bool may_use_optimizers);
1672 ~PathCumulFilter() override = default;
1673 std::string DebugString() const override {
1674 return "PathCumulFilter(" + name_ + ")";
1675 }
1676 int64_t GetSynchronizedObjectiveValue() const override {
1677 return propagate_own_objective_value_ ? synchronized_objective_value_ : 0;
1678 }
1679 int64_t GetAcceptedObjectiveValue() const override {
1680 return lns_detected() || !propagate_own_objective_value_
1681 ? 0
1682 : accepted_objective_value_;
1683 }
1684 bool UsesDimensionOptimizers() {
1685 if (!may_use_optimizers_) return false;
1686 for (int vehicle = 0; vehicle < routing_model_.vehicles(); ++vehicle) {
1687 if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) return true;
1688 }
1689 return false;
1690 }
1691
1692 private:
1693 using Interval = DimensionValues::Interval;
1694 struct SoftBound {
1695 int64_t bound = -1;
1696 int64_t coefficient = 0;
1697 };
1698
1699 // Data extractors used in constructor.
1700 std::vector<Interval> ExtractInitialCumulIntervals();
1701 std::vector<Interval> ExtractInitialSlackIntervals();
1702 std::vector<std::vector<RoutingDimension::NodePrecedence>>
1703 ExtractNodeIndexToPrecedences() const;
1704 std::vector<SoftBound> ExtractCumulSoftUpperBounds() const;
1705 std::vector<SoftBound> ExtractCumulSoftLowerBounds() const;
1706 std::vector<const PiecewiseLinearFunction*> ExtractCumulPiecewiseLinearCosts()
1707 const;
1708 std::vector<const RoutingModel::TransitCallback2*> ExtractEvaluators() const;
1709 using VehicleBreak = DimensionValues::VehicleBreak;
1710 std::vector<std::vector<VehicleBreak>> ExtractInitialVehicleBreaks() const;
1711
1712 bool InitializeAcceptPath() override {
1713 accepted_objective_value_ = synchronized_objective_value_;
1714 dimension_values_.Revert();
1715 cost_of_path_.Revert();
1716 global_span_cost_.Revert();
1717 location_of_node_.Revert();
1718 return true;
1719 }
1720 bool AcceptPath(int64_t path_start, int64_t chain_start,
1721 int64_t chain_end) override;
1722 bool FinalizeAcceptPath(int64_t objective_min,
1723 int64_t objective_max) override;
1724
1725 void OnBeforeSynchronizePaths(bool synchronizing_all_paths) override;
1726 void OnSynchronizePathFromStart(int64_t start) override;
1727 void OnAfterSynchronizePaths() override;
1728
1729 // TODO(user): consider making those methods external functions.
1730 // Fills the data of path in dimension_values_: nodes, cumuls, travels,
1731 // transits, travel_sums, span limits.
1732 bool FillDimensionValues(int path);
1733 // Propagates the transit constraint, cumul[r] + transit[r] == cumul[r+1],
1734 // in dimension_values_'s current path data.
1735 bool PropagateTransitsAndSpans(int path);
1736 // Propagates both the transit constraint and cumul forbidden intervals.
1737 bool PropagateTransitsWithForbiddenIntervals(int path);
1738 // Propagates the span constraint, cumul[start] + span == cumul[end].
1739 bool PropagateSpan(int path);
1740 // Propagates cumul[delivery] <= cumul[pickup] + pickup_to_delivery_limit.
1741 bool PropagatePickupToDeliveryLimits(int path);
1742 // Propagates the vehicle breaks constraint.
1743 bool PropagateVehicleBreaks(int path);
1744
1745 // Returns true iff there are any precedence constraints to enforce.
1746 bool FilterPrecedences() const { return !node_index_to_precedences_.empty(); }
1747 // Returns true iff there is a global span cost.
1748 bool FilterGlobalSpanCost() const {
1749 return global_span_cost_coefficient_ != 0;
1750 }
1751 // Returns true iff the given path has vehicle breaks.
1752 bool FilterVehicleBreaks(int path) const {
1753 return dimension_.HasBreakConstraints() &&
1754 (!dimension_.GetBreakIntervalsOfVehicle(path).empty() ||
1755 !dimension_.GetBreakDistanceDurationOfVehicle(path).empty());
1756 }
1757 // Returns true iff the given path has a linear soft span cost.
1758 bool FilterSoftSpanCost(int path) const {
1759 return dimension_.HasSoftSpanUpperBounds() &&
1760 dimension_.GetSoftSpanUpperBoundForVehicle(path).cost > 0;
1761 }
1762 // Returns true iff the given path has a quadratic soft span cost.
1763 bool FilterSoftSpanQuadraticCost(int path) const {
1764 if (!dimension_.HasQuadraticCostSoftSpanUpperBounds()) return false;
1765 const auto [bound, cost] =
1766 dimension_.GetQuadraticCostSoftSpanUpperBoundForVehicle(path);
1767 return bound < kint64max && cost != 0;
1768 }
1769 // Returns true iff an LP/MP optimizer should be used for the given vehicle.
1770 bool FilterWithDimensionCumulOptimizerForVehicle(int vehicle) const {
1771 if (!may_use_optimizers_) return false;
1772 if (!cumul_piecewise_linear_costs_.empty()) return false;
1773
1774 int num_linear_constraints = 0;
1775 if (dimension_.GetSpanCostCoefficientForVehicle(vehicle) > 0 ||
1776 dimension_.GetSlackCostCoefficientForVehicle(vehicle) > 0) {
1777 ++num_linear_constraints;
1778 }
1779 if (FilterSoftSpanCost(vehicle)) ++num_linear_constraints;
1780 if (!cumul_soft_upper_bounds_.empty()) ++num_linear_constraints;
1781 if (!cumul_soft_lower_bounds_.empty()) ++num_linear_constraints;
1782 if (path_span_upper_bounds_[vehicle] < kint64max) {
1783 ++num_linear_constraints;
1784 }
1785 const bool has_breaks = FilterVehicleBreaks(vehicle);
1786 if (has_breaks) ++num_linear_constraints;
1787 // The DimensionCumulOptimizer is used to compute a more precise value of
1788 // the cost related to the cumul values (soft bounds and span/slack costs).
1789 // It is also used to guarantee feasibility with complex mixes of
1790 // constraints and in particular in the presence of break requests along
1791 // other constraints. Therefore, without breaks, we only use the optimizer
1792 // when the costs are actually used to filter the solutions, i.e. when
1793 // filter_objective_cost_ is true.
1794 return num_linear_constraints >= 2 &&
1795 (has_breaks || filter_objective_cost_);
1796 }
1797
1798 // Data about the routing problem, used as read-only input.
1799 const RoutingModel& routing_model_;
1800 const RoutingDimension& dimension_;
1801 const std::vector<Interval> initial_cumul_;
1802 const std::vector<Interval> initial_slack_;
1803 const std::vector<std::vector<VehicleBreak>> initial_vehicle_breaks_;
1804 // Maps vehicle/path to their values, values are always present.
1805 const std::vector<const RoutingModel::TransitCallback2*> evaluators_;
1806 const std::vector<int64_t> path_capacities_;
1807 const std::vector<int64_t> path_span_upper_bounds_;
1808 const std::vector<int64_t> path_total_slack_cost_coefficients_;
1809
1810 // Global span cost data.
1811 // Global span cost coefficient: the global span interval is
1812 // [min_{vehicle} cumul(start), max_{vehicle} cumul(end)),
1813 // over vehicle that are nonempty or used when empty.
1814 // The global span cost is the length of the interval times this coefficient.
1815 //
1816 // This class keeps data about active paths at Synchronize(): paths sorted
1817 // by decreasing span min, decreasing end min, and increasing start max.
1818 // At Accept(), it first propagates all pathwise constraints in changed
1819 // paths. Then it computes:
1820 // - active_start_max = min_{active vehicle} cumul_[vehicle].front().max
1821 // - active_end_min = max_{active vehicle} cumul_[vehicle].back().min
1822 // - active_span_min = max_{active vehicle} span_[vehicle].min,
1823 // which allow to compute a lower bound on the global span, as
1824 // max(active_end_min - active_start_max, active_span_min, 0).
1825 //
1826 // To find the active_ values, this class merges bounds from the active paths,
1827 // among changed paths and unchanged paths:
1828 // - changed paths have their new bounds in dimension_values_.
1829 // - since committed paths have their bounds sorted in the vectors below,
1830 // scanning for the first path that is unchanged yields the min or max
1831 // value for all unchanged paths.
1832 // Both types of scan are done in O(#changed paths) time.
1833 const int64_t global_span_cost_coefficient_;
1834 struct ValuedPath {
1835 int64_t value;
1836 int path;
1837 bool operator<(const ValuedPath& other) const {
1838 return std::tie(value, path) < std::tie(other.value, other.path);
1839 }
1840 bool operator>(const ValuedPath& other) const {
1841 return std::tie(value, path) > std::tie(other.value, other.path);
1842 }
1843 };
1844 // Committed active paths, sorted by decreasing span min.
1845 std::vector<ValuedPath> paths_by_decreasing_span_min_;
1846 // Committed active paths, sorted by decreasing end min.
1847 std::vector<ValuedPath> paths_by_decreasing_end_min_;
1848 // Committed active paths, sorted by increasing start max.
1849 std::vector<ValuedPath> paths_by_increasing_start_max_;
1850 // Holds committed/current global span cost.
1851 CommittableValue<int64_t> global_span_cost_;
1852
1853 // Empty if there are no soft bounds, otherwise maps each node to its bound.
1854 const std::vector<SoftBound> cumul_soft_upper_bounds_;
1855 // Empty if there are no soft bounds, otherwise maps each node to its bound.
1856 const std::vector<SoftBound> cumul_soft_lower_bounds_;
1857 // Empty if there are no linear costs, otherwise maps each node to its cost.
1858 const std::vector<const PiecewiseLinearFunction*>
1859 cumul_piecewise_linear_costs_;
1860 // True iff some cumul has forbidden intervals.
1861 const bool has_forbidden_intervals_;
1862
1863 // Data reflecting information on path variables for the committed and the
1864 // current solution.
1865 DimensionValues dimension_values_;
1866 PrePostVisitValues visits_;
1867 BreakPropagator break_propagator_;
1868
1869 // Maps each path to the sum of its path-only costs: span/slack cost,
1870 // soft cumul costs, soft span limits.
1871 CommittableArray<int64_t> cost_of_path_;
1872 int64_t synchronized_objective_value_;
1873 int64_t accepted_objective_value_;
1874
1875 struct RankAndIndex {
1876 int rank = -1;
1877 int index = -1;
1878 };
1879 // When examining a path for pickup to delivery limits, allows to find if
1880 // some pickup alternative is on the path, and at what rank (position).
1881 // Only the Revert() method of CommittableVector is used, to reset all
1882 // locations to the default dummy value before examining a path.
1883 CommittableArray<RankAndIndex> pickup_rank_and_alternative_index_of_pair_;
1884
1885 // node_index_to_precedences_[node_index] contains all NodePrecedence elements
1886 // with node_index as either "first_node" or "second_node".
1887 // This vector is empty if there are no precedences on the dimension_.
1888 const std::vector<std::vector<RoutingDimension::NodePrecedence>>
1889 node_index_to_precedences_;
1890 absl::flat_hash_map<std::pair<int, int>, int64_t> precedence_offsets_;
1891 struct PathAndRank {
1892 int path = -1;
1893 int rank = -1;
1894 };
1895 // Maps the location of each node in the committed and current solutions.
1896 // This is used when enforcing precedence constraints
1897 CommittableArray<PathAndRank> location_of_node_;
1898
1899 // Name of the dimension, for debugging/profiling purposes.
1900 const std::string name_;
1901
1902 LocalDimensionCumulOptimizer* lp_optimizer_;
1903 LocalDimensionCumulOptimizer* mp_optimizer_;
1904 const std::function<int64_t(int64_t)> path_accessor_;
1905 const bool filter_objective_cost_;
1906 // True iff this object may use an LP or MP optimizer to solve sub-problems.
1907 const bool may_use_optimizers_;
1908 const bool propagate_own_objective_value_;
1909};
1910
1911namespace {
1912template <typename T>
1913std::vector<T> SumOfVectors(const std::vector<T>& v1,
1914 const std::vector<T>& v2) {
1915 DCHECK_EQ(v1.size(), v2.size());
1916 std::vector<T> sum(v1.size());
1917 absl::c_transform(v1, v2, sum.begin(), [](T a, T b) { return CapAdd(a, b); });
1918 return sum;
1919}
1920} // namespace
1921
1922std::vector<PathCumulFilter::Interval>
1923PathCumulFilter::ExtractInitialCumulIntervals() {
1924 std::vector<Interval> intervals;
1925 intervals.reserve(dimension_.cumuls().size());
1926 for (const IntVar* cumul : dimension_.cumuls()) {
1927 intervals.push_back({cumul->Min(), cumul->Max()});
1928 }
1929 return intervals;
1930}
1931
1932std::vector<PathCumulFilter::Interval>
1933PathCumulFilter::ExtractInitialSlackIntervals() {
1934 std::vector<Interval> intervals;
1935 intervals.reserve(dimension_.slacks().size());
1936 for (const IntVar* slack : dimension_.slacks()) {
1937 intervals.push_back({slack->Min(), slack->Max()});
1938 }
1939 return intervals;
1940}
1941
1942std::vector<PathCumulFilter::SoftBound>
1943PathCumulFilter::ExtractCumulSoftUpperBounds() const {
1944 const int num_cumuls = dimension_.cumuls().size();
1945 std::vector<SoftBound> bounds(num_cumuls,
1946 {.bound = kint64max, .coefficient = 0});
1947 bool has_some_bound = false;
1948 for (int i = 0; i < num_cumuls; ++i) {
1949 if (!dimension_.HasCumulVarSoftUpperBound(i)) continue;
1950 const int64_t bound = dimension_.GetCumulVarSoftUpperBound(i);
1951 const int64_t coeff = dimension_.GetCumulVarSoftUpperBoundCoefficient(i);
1952 bounds[i] = {.bound = bound, .coefficient = coeff};
1953 has_some_bound |= bound < kint64max && coeff != 0;
1954 }
1955 if (!has_some_bound) bounds.clear();
1956 return bounds;
1957}
1958
1959std::vector<PathCumulFilter::SoftBound>
1960PathCumulFilter::ExtractCumulSoftLowerBounds() const {
1961 const int num_cumuls = dimension_.cumuls().size();
1962 std::vector<SoftBound> bounds(num_cumuls, {.bound = 0, .coefficient = 0});
1963 bool has_some_bound = false;
1964 for (int i = 0; i < num_cumuls; ++i) {
1965 if (!dimension_.HasCumulVarSoftLowerBound(i)) continue;
1966 const int64_t bound = dimension_.GetCumulVarSoftLowerBound(i);
1967 const int64_t coeff = dimension_.GetCumulVarSoftLowerBoundCoefficient(i);
1968 bounds[i] = {.bound = bound, .coefficient = coeff};
1969 has_some_bound |= bound > 0 && coeff != 0;
1970 }
1971 if (!has_some_bound) bounds.clear();
1972 return bounds;
1973}
1974
1975std::vector<const PiecewiseLinearFunction*>
1976PathCumulFilter::ExtractCumulPiecewiseLinearCosts() const {
1977 const int num_cumuls = dimension_.cumuls().size();
1978 std::vector<const PiecewiseLinearFunction*> costs(num_cumuls, nullptr);
1979 bool has_some_cost = false;
1980 for (int i = 0; i < dimension_.cumuls().size(); ++i) {
1981 if (!dimension_.HasCumulVarPiecewiseLinearCost(i)) continue;
1982 const PiecewiseLinearFunction* const cost =
1983 dimension_.GetCumulVarPiecewiseLinearCost(i);
1984 if (cost == nullptr) continue;
1985 has_some_cost = true;
1986 costs[i] = cost;
1987 }
1988 if (!has_some_cost) costs.clear();
1989 return costs;
1990}
1991
1992std::vector<const RoutingModel::TransitCallback2*>
1993PathCumulFilter::ExtractEvaluators() const {
1994 const int num_paths = NumPaths();
1995 std::vector<const RoutingModel::TransitCallback2*> evaluators(num_paths);
1996 for (int i = 0; i < num_paths; ++i) {
1997 evaluators[i] = &dimension_.transit_evaluator(i);
1998 }
1999 return evaluators;
2000}
2001
2002std::vector<std::vector<RoutingDimension::NodePrecedence>>
2003PathCumulFilter::ExtractNodeIndexToPrecedences() const {
2004 std::vector<std::vector<RoutingDimension::NodePrecedence>>
2005 node_index_to_precedences;
2006 const std::vector<RoutingDimension::NodePrecedence>& node_precedences =
2007 dimension_.GetNodePrecedences();
2008 if (!node_precedences.empty()) {
2009 node_index_to_precedences.resize(initial_cumul_.size());
2010 for (const auto& node_precedence : node_precedences) {
2011 node_index_to_precedences[node_precedence.first_node].push_back(
2012 node_precedence);
2013 node_index_to_precedences[node_precedence.second_node].push_back(
2014 node_precedence);
2015 }
2016 }
2017 return node_index_to_precedences;
2018}
2019
2020std::vector<std::vector<DimensionValues::VehicleBreak>>
2021PathCumulFilter::ExtractInitialVehicleBreaks() const {
2022 const int num_vehicles = routing_model_.vehicles();
2023 std::vector<std::vector<VehicleBreak>> vehicle_breaks(num_vehicles);
2024 if (!dimension_.HasBreakConstraints()) return vehicle_breaks;
2025 for (int v = 0; v < num_vehicles; ++v) {
2026 for (const IntervalVar* br : dimension_.GetBreakIntervalsOfVehicle(v)) {
2027 vehicle_breaks[v].push_back(
2028 {.start = {.min = br->StartMin(), .max = br->StartMax()},
2029 .end = {.min = br->EndMin(), .max = br->EndMax()},
2030 .duration = {.min = br->DurationMin(), .max = br->DurationMax()},
2031 .is_performed = {.min = br->MustBePerformed(),
2032 .max = br->MayBePerformed()}});
2033 }
2034 }
2035 return vehicle_breaks;
2036}
2037
2038PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model,
2039 const RoutingDimension& dimension,
2040 bool propagate_own_objective_value,
2041 bool filter_objective_cost,
2042 bool may_use_optimizers)
2043 : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(),
2044 routing_model.GetPathsMetadata()),
2045 routing_model_(routing_model),
2046 dimension_(dimension),
2047 initial_cumul_(ExtractInitialCumulIntervals()),
2048 initial_slack_(ExtractInitialSlackIntervals()),
2049 initial_vehicle_breaks_(ExtractInitialVehicleBreaks()),
2050 evaluators_(ExtractEvaluators()),
2051
2052 path_capacities_(dimension.vehicle_capacities()),
2053 path_span_upper_bounds_(dimension.vehicle_span_upper_bounds()),
2054 path_total_slack_cost_coefficients_(
2055 SumOfVectors(dimension.vehicle_span_cost_coefficients(),
2056 dimension.vehicle_slack_cost_coefficients())),
2057 global_span_cost_coefficient_(dimension.global_span_cost_coefficient()),
2058 global_span_cost_(0),
2059 cumul_soft_upper_bounds_(ExtractCumulSoftUpperBounds()),
2060 cumul_soft_lower_bounds_(ExtractCumulSoftLowerBounds()),
2061 cumul_piecewise_linear_costs_(ExtractCumulPiecewiseLinearCosts()),
2062 has_forbidden_intervals_(
2063 absl::c_any_of(dimension.forbidden_intervals(),
2064 [](const SortedDisjointIntervalList& intervals) {
2065 return intervals.NumIntervals() > 0;
2066 })),
2067
2068 dimension_values_(routing_model.vehicles(), dimension.cumuls().size()),
2069 visits_(routing_model.vehicles(), dimension.cumuls().size()),
2070 break_propagator_(dimension.cumuls().size()),
2071 cost_of_path_(NumPaths(), 0),
2072 synchronized_objective_value_(0),
2073 accepted_objective_value_(0),
2074 pickup_rank_and_alternative_index_of_pair_(
2075 routing_model_.GetPickupAndDeliveryPairs().size(), {-1, -1}),
2076 node_index_to_precedences_(ExtractNodeIndexToPrecedences()),
2077 location_of_node_(dimension.cumuls().size(), {-1, -1}),
2078 name_(dimension.name()),
2079 lp_optimizer_(routing_model.GetMutableLocalCumulLPOptimizer(dimension)),
2080 mp_optimizer_(routing_model.GetMutableLocalCumulMPOptimizer(dimension)),
2081 path_accessor_([this](int64_t node) { return GetNext(node); }),
2082 filter_objective_cost_(filter_objective_cost),
2083 may_use_optimizers_(may_use_optimizers),
2084 propagate_own_objective_value_(propagate_own_objective_value) {
2085 for (int node = 0; node < node_index_to_precedences_.size(); ++node) {
2086 for (const auto [first_node, second_node, offset,
2087 unused_performed_constraint] :
2088 node_index_to_precedences_[node]) {
2089 int64_t& current_offset = gtl::LookupOrInsert(
2090 &precedence_offsets_, {first_node, second_node}, offset);
2091 current_offset = std::max(current_offset, offset);
2092 }
2093 }
2094#ifndef NDEBUG
2095 for (int vehicle = 0; vehicle < routing_model.vehicles(); vehicle++) {
2096 if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) {
2097 DCHECK_NE(lp_optimizer_, nullptr);
2098 DCHECK_NE(mp_optimizer_, nullptr);
2099 }
2100 }
2101#endif // NDEBUG
2102}
2103
2104bool PathCumulFilter::PropagateTransitsAndSpans(int path) {
2105 if (has_forbidden_intervals_) {
2106 return PropagateSpan(path) &&
2107 PropagateTransitsWithForbiddenIntervals(path) && PropagateSpan(path);
2108 } else {
2109 return PropagateTransitAndSpan(path, dimension_values_);
2110 }
2111}
2112
2113bool PathCumulFilter::PropagateTransitsWithForbiddenIntervals(int path) {
2114 DCHECK_LT(0, dimension_values_.NumNodes(path));
2115 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
2116 absl::Span<const Interval> transits = dimension_values_.Transits(path);
2117 absl::Span<const int> nodes = dimension_values_.Nodes(path);
2118 const int num_nodes = dimension_values_.NumNodes(path);
2119 auto reduce_to_allowed_interval = [this](Interval& interval,
2120 int node) -> bool {
2121 DCHECK(!interval.IsEmpty());
2122 interval.min = dimension_.GetFirstPossibleGreaterOrEqualValueForNode(
2123 node, interval.min);
2124 if (interval.IsEmpty()) return false;
2125 interval.max =
2126 dimension_.GetLastPossibleLessOrEqualValueForNode(node, interval.max);
2127 return !interval.IsEmpty();
2128 };
2129 // Propagate from start to end.
2130 if (!reduce_to_allowed_interval(cumuls[0], nodes[0])) return false;
2131 Interval cumul = cumuls[0];
2132 for (int r = 1; r < num_nodes; ++r) {
2133 cumul.Add(transits[r - 1]);
2134 if (!cumul.IntersectWith(cumuls[r])) return false;
2135 if (!reduce_to_allowed_interval(cumul, nodes[r])) return false;
2136 cumuls[r] = cumul;
2137 }
2138 // Propagate from end to start.
2139 for (int r = num_nodes - 2; r >= 0; --r) {
2140 cumul.Subtract(transits[r]);
2141 if (!cumul.IntersectWith(cumuls[r])) return false;
2142 if (!reduce_to_allowed_interval(cumul, nodes[r])) return false;
2143 cumuls[r] = cumul;
2144 }
2145 return true;
2146}
2147
2148bool PathCumulFilter::PropagateSpan(int path) {
2149 absl::Span<const int64_t> travel_sums = dimension_values_.TravelSums(path);
2150 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
2151 // Copy values to make it clear to the compiler we are working on different
2152 // values, we'll commit them back at the end of the modifications.
2153 Interval start = cumuls.front();
2154 Interval end = cumuls.back();
2155 Interval span = dimension_values_.Span(path);
2156 if (!span.IncreaseMin(travel_sums.back())) return false;
2157 // We propagate equation: end - start - span = 0.
2158 // In the <= 0 part of this equation, consider two of the terms at their min
2159 // and filter the max of the last term.
2160 if (!end.DecreaseMax(CapAdd(start.max, span.max))) return false;
2161 if (!start.IncreaseMin(CapSub(end.min, span.max))) return false;
2162 if (!span.IncreaseMin(CapSub(end.min, start.max))) return false;
2163 // In the >= 0 part of this equation, consider two of the terms at their max
2164 // and filter the min of the last term.
2165 if (!span.DecreaseMax(CapSub(end.max, start.min))) return false;
2166 if (!end.IncreaseMin(CapAdd(start.min, span.min))) return false;
2167 if (!start.DecreaseMax(CapSub(end.max, span.min))) return false;
2168 // Commit back to input data structures.
2169 cumuls.front() = start;
2170 cumuls.back() = end;
2171 dimension_values_.MutableSpan(path) = span;
2172 return true;
2173}
2174
2175bool PathCumulFilter::FillDimensionValues(int path) {
2176 // Fill nodes.
2177 int node = Start(path);
2178 dimension_values_.PushNode(node);
2179 while (node < Size()) {
2180 const int next = GetNext(node);
2181 dimension_values_.PushNode(next);
2182 node = next;
2183 }
2184 dimension_values_.MakePathFromNewNodes(path);
2186 path, path_capacities_[path], path_span_upper_bounds_[path],
2187 initial_cumul_, initial_slack_, *evaluators_[path],
2188 dimension_values_)) {
2189 return false;
2190 }
2191 dimension_values_.MutableVehicleBreaks(path) = initial_vehicle_breaks_[path];
2192 return true;
2193}
2194
2195bool PathCumulFilter::PropagatePickupToDeliveryLimits(int path) {
2196 const int num_nodes = dimension_values_.NumNodes(path);
2197 absl::Span<const int> nodes = dimension_values_.Nodes(path);
2198 absl::Span<const int64_t> travel_sums = dimension_values_.TravelSums(path);
2199 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
2200
2201 // For each paired node, find whether it has a sibling.
2202 // This does not check whether the pickup-delivery constraint is satisfied,
2203 // only that if some pickup alternative and delivery alternative of a pair
2204 // appear in the same path, they do respect the pickup-delivery limit.
2205 // First, allocate memory if necessary.
2206 pickup_rank_and_alternative_index_of_pair_.Revert();
2207 for (int rank = 1; rank < num_nodes - 1; ++rank) {
2208 const int node = nodes[rank];
2209 const std::optional<RoutingModel::PickupDeliveryPosition> pickup_pos =
2210 routing_model_.GetPickupPosition(node);
2211 // If node is a pickup, record its position and continue;
2212 if (pickup_pos.has_value()) {
2213 const auto [pair_index, pickup_alternative_index] = *pickup_pos;
2214 pickup_rank_and_alternative_index_of_pair_.Set(
2215 pair_index, {.rank = rank, .index = pickup_alternative_index});
2216 continue;
2217 }
2218 // Propagate the limit if:
2219 // - node is a delivery,
2220 // - a corresponding pickup is in the path,
2221 // - and there is a nontrivial limit.
2222 const std::optional<RoutingModel::PickupDeliveryPosition> delivery_pos =
2223 routing_model_.GetDeliveryPosition(node);
2224 if (!delivery_pos.has_value()) continue;
2225 const auto [pair_index, delivery_alt_index] = *delivery_pos;
2226 const auto [pickup_rank, pickup_alt_index] =
2227 pickup_rank_and_alternative_index_of_pair_.Get(pair_index);
2228 if (pickup_rank == -1) continue;
2229
2230 const int64_t limit = dimension_.GetPickupToDeliveryLimitForPair(
2231 pair_index, pickup_alt_index, delivery_alt_index);
2232 if (limit == kint64max) continue;
2233
2234 // If the total travel between pickup and delivery is larger than the
2235 // limit, we can reject immediately.
2236 const int64_t total_travel =
2237 CapSub(travel_sums[rank], travel_sums[pickup_rank]);
2238 if (total_travel > limit) return false;
2239
2240 // Otherwise, propagate cumul[rank] <= cumul[pickup_rank] + limit.
2241 if (!cumuls[rank].DecreaseMax(CapAdd(cumuls[pickup_rank].max, limit))) {
2242 return false;
2243 }
2244 if (!cumuls[pickup_rank].IncreaseMin(CapSub(cumuls[rank].min, limit))) {
2245 return false;
2246 }
2247 }
2248 return true;
2249}
2250
2251bool PathCumulFilter::PropagateVehicleBreaks(int path) {
2253 path, dimension_values_,
2254 dimension_.GetBreakDistanceDurationOfVehicle(path));
2255}
2256
2257bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/,
2258 int64_t /*chain_end*/) {
2259 const int path = GetPath(path_start);
2260 if (!FillDimensionValues(path)) return false;
2261
2262 // Filter feasibility: cumul windows, transit, span, breaks.
2263 // The first PropagateSpan() is mostly used to check feasibility of total
2264 // travel within span max, the second can tighten all start/end/span bounds.
2265 if (!PropagateTransitsAndSpans(path)) return false;
2266 if (dimension_.HasPickupToDeliveryLimits()) {
2267 if (!PropagatePickupToDeliveryLimits(path)) return false;
2268 // Re-propagate span and transits.
2269 if (!PropagateTransitsAndSpans(path)) return false;
2270 }
2271 if (FilterVehicleBreaks(path)) {
2272 // TODO(user) using enum BreakPropagator::PropagationResult once C++20
2273 // support is available in OSS.
2274 const auto& interbreaks =
2275 dimension_.GetBreakDistanceDurationOfVehicle(path);
2276 if (!PropagateVehicleBreaks(path) ||
2277 break_propagator_.PropagateInterbreak(path, dimension_values_,
2278 interbreaks) ==
2279 BreakPropagator::PropagationResult::kInfeasible ||
2280 !PropagateTransitsAndSpans(path)) {
2281 return false;
2282 }
2283 // Fill pre/post travel data.
2284 visits_.Revert();
2285 auto any_invocable = [this](int evaluator_index)
2286 -> std::optional<absl::AnyInvocable<int64_t(int64_t, int64_t) const>> {
2287 const auto& evaluator =
2288 evaluator_index == -1
2289 ? nullptr
2290 : dimension_.model()->TransitCallback(evaluator_index);
2291 if (evaluator == nullptr) return std::nullopt;
2292 return evaluator;
2293 };
2295 path, dimension_values_,
2296 any_invocable(dimension_.GetPreTravelEvaluatorOfVehicle(path)),
2297 any_invocable(dimension_.GetPostTravelEvaluatorOfVehicle(path)),
2298 visits_);
2299 // Loop until there are no changes, capped at a small number of iterations.
2300 BreakPropagator::PropagationResult result = BreakPropagator::kChanged;
2301 int num_iterations = 2;
2302 while (result == BreakPropagator::kChanged && num_iterations-- > 0) {
2303 result =
2304 break_propagator_.FastPropagations(path, dimension_values_, visits_);
2305 if (result == BreakPropagator::kChanged) {
2306 if (!PropagateVehicleBreaks(path) ||
2307 break_propagator_.PropagateInterbreak(path, dimension_values_,
2308 interbreaks) ==
2309 BreakPropagator::PropagationResult::kInfeasible ||
2310 !PropagateTransitsAndSpans(path)) {
2311 return false;
2312 }
2313 }
2314 }
2315 if (result == BreakPropagator::kInfeasible) return false;
2316 }
2317
2318 // Filter costs: span (linear/quadratic/piecewise),
2319 // soft cumul windows (linear/piecewise).
2320 if (!filter_objective_cost_) return true;
2321
2322 CapSubFrom(cost_of_path_.Get(path), &accepted_objective_value_);
2323 if (routing_model_.IsEnd(GetNext(path_start)) &&
2324 !routing_model_.IsVehicleUsedWhenEmpty(path)) {
2325 cost_of_path_.Set(path, 0);
2326 return true;
2327 }
2328
2329 const absl::Span<const Interval> cumuls = dimension_values_.Cumuls(path);
2330 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
2331 const Interval span = dimension_values_.Span(path);
2332 const int64_t total_travel = dimension_values_.TravelSums(path).back();
2333 const int64_t min_total_slack = CapSub(span.min, total_travel);
2334
2335 int64_t new_path_cost = 0;
2336 // Add span and slack costs.
2337 CapAddTo(CapProd(path_total_slack_cost_coefficients_[path], min_total_slack),
2338 &new_path_cost);
2339 // Add soft span costs.
2340 if (dimension_.HasSoftSpanUpperBounds()) {
2341 const auto [bound, cost] = dimension_.GetSoftSpanUpperBoundForVehicle(path);
2342 CapAddTo(CapProd(cost, std::max<int64_t>(0, CapSub(span.min, bound))),
2343 &new_path_cost);
2344 }
2345 if (dimension_.HasQuadraticCostSoftSpanUpperBounds()) {
2346 const auto [bound, cost] =
2347 dimension_.GetQuadraticCostSoftSpanUpperBoundForVehicle(path);
2348 const int64_t violation = std::max<int64_t>(0, CapSub(span.min, bound));
2349 CapAddTo(CapProd(cost, CapProd(violation, violation)), &new_path_cost);
2350 }
2351 // Add soft cumul lower bound costs.
2352 const int num_path_nodes = dimension_values_.NumNodes(path);
2353 if (!cumul_soft_lower_bounds_.empty()) {
2354 for (int r = 0; r < num_path_nodes; ++r) {
2355 const auto [bound, coef] = cumul_soft_lower_bounds_[nodes[r]];
2356 CapAddTo(
2357 CapProd(coef, std::max<int64_t>(0, CapSub(bound, cumuls[r].max))),
2358 &new_path_cost);
2359 }
2360 }
2361 // Add soft cumul upper bound costs.
2362 if (!cumul_soft_upper_bounds_.empty()) {
2363 for (int r = 0; r < num_path_nodes; ++r) {
2364 const auto [bound, coef] = cumul_soft_upper_bounds_[nodes[r]];
2365 CapAddTo(
2366 CapProd(coef, std::max<int64_t>(0, CapSub(cumuls[r].min, bound))),
2367 &new_path_cost);
2368 }
2369 }
2370 // Add piecewise linear costs.
2371 // TODO(user): this supposes the function to be increasing,
2372 // make it correct in other cases, at least decreasing functions.
2373 if (!cumul_piecewise_linear_costs_.empty()) {
2374 for (int r = 0; r < num_path_nodes; ++r) {
2375 const PiecewiseLinearFunction* cost =
2376 cumul_piecewise_linear_costs_[nodes[r]];
2377 if (cost == nullptr) continue;
2378 CapAddTo(cost->Value(cumuls[r].min), &new_path_cost);
2379 }
2380 }
2381 // Replace committed cost of this path new cost.
2382 CapAddTo(new_path_cost, &accepted_objective_value_);
2383 cost_of_path_.Set(path, new_path_cost);
2384 return true;
2385}
2386
2387bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/,
2388 int64_t objective_max) {
2389 if (lns_detected()) return true;
2390 if (FilterPrecedences()) {
2391 // Fast pass on consecutive nodes of changed paths, useful when the number
2392 // of precedences is much larger than the number of nodes.
2393 // TODO(user): Remove this when we have a better way to express
2394 // precedence chains, which does not require a quadratic number of
2395 // precedences.
2396 for (const int path : dimension_values_.ChangedPaths()) {
2397 const absl::Span<const int64_t> travel_sums =
2398 dimension_values_.TravelSums(path);
2399 int prev = -1;
2400 int rank = -1;
2401 for (const int node : dimension_values_.Nodes(path)) {
2402 int64_t offset = std::numeric_limits<int64_t>::min();
2403 // Check the "opposite" precedence constraint.
2404 if (gtl::FindCopy(precedence_offsets_, std::pair<int, int>{node, prev},
2405 &offset) &&
2406 CapSub(travel_sums[rank], travel_sums[rank + 1]) < offset) {
2407 return false;
2408 }
2409 prev = node;
2410 ++rank;
2411 }
2412 }
2413 // Find location of all nodes: remove committed nodes of changed paths,
2414 // then add nodes of changed paths. This removes nodes that became loops.
2415 for (const int path : dimension_values_.ChangedPaths()) {
2416 for (const int node : dimension_values_.CommittedNodes(path)) {
2417 location_of_node_.Set(node, {.path = -1, .rank = -1});
2418 }
2419 }
2420 for (const int path : dimension_values_.ChangedPaths()) {
2421 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
2422 const int num_nodes = nodes.size();
2423 for (int rank = 0; rank < num_nodes; ++rank) {
2424 location_of_node_.Set(nodes[rank], {.path = path, .rank = rank});
2425 }
2426 }
2427 // For every node in changed path, check precedences.
2428 for (const int path : dimension_values_.ChangedPaths()) {
2429 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
2430 const int num_nodes = nodes.size();
2431 for (int rank = 0; rank < num_nodes; ++rank) {
2432 const int node = nodes[rank];
2433 for (const auto [first_node, second_node, offset,
2434 performed_constraint] :
2435 node_index_to_precedences_[node]) {
2436 const auto [path1, rank1] = location_of_node_.Get(first_node);
2437 const auto [path2, rank2] = location_of_node_.Get(second_node);
2438 if (path1 == -1 && !IsVarSynced(first_node)) continue;
2439 if (path2 == -1 && !IsVarSynced(second_node)) continue;
2440 switch (RoutingDimension::GetPrecedenceStatus(
2441 path1 == -1, path2 == -1, performed_constraint)) {
2442 case RoutingDimension::PrecedenceStatus::kActive:
2443 break;
2444 case RoutingDimension::PrecedenceStatus::kInactive:
2445 continue;
2446 case RoutingDimension::PrecedenceStatus::kInvalid:
2447 return false;
2448 }
2449 DCHECK(node == first_node || node == second_node);
2450 DCHECK_EQ(first_node, dimension_values_.Nodes(path1)[rank1]);
2451 DCHECK_EQ(second_node, dimension_values_.Nodes(path2)[rank2]);
2452 // Check the compatibility between the precedence and the implicit
2453 // precedence induced from the route sequence.
2454 if (path1 == path2 && rank2 < rank1) {
2455 absl::Span<const int64_t> travel_sums =
2456 dimension_values_.TravelSums(path);
2457 // Check that travel(second_node, first_node) <= -offset,
2458 // (equivalent to -travel(second_node, first_node) >= offset).
2459 if (CapSub(travel_sums[rank2], travel_sums[rank1]) < offset) {
2460 return false;
2461 }
2462 }
2463 // Check that cumul1 + offset <= cumul2 is feasible.
2464 if (CapAdd(dimension_values_.Cumuls(path1)[rank1].min, offset) >
2465 dimension_values_.Cumuls(path2)[rank2].max)
2466 return false;
2467 }
2468 }
2469 }
2470 }
2471 if (filter_objective_cost_ && FilterGlobalSpanCost()) {
2472 int64_t global_end_min = kint64min;
2473 int64_t global_start_max = kint64max;
2474 int64_t global_span_min = 0;
2475 // Find global values of changed paths.
2476 for (const int path : dimension_values_.ChangedPaths()) {
2477 if (dimension_values_.NumNodes(path) == 0) continue;
2478 if (dimension_values_.NumNodes(path) == 2 &&
2479 !routing_model_.IsVehicleUsedWhenEmpty(path)) {
2480 continue;
2481 }
2482 global_span_min =
2483 std::max(global_span_min, dimension_values_.Span(path).min);
2484 const int64_t end_min = dimension_values_.Cumuls(path).back().min;
2485 global_end_min = std::max(global_end_min, end_min);
2486 const int64_t start_max = dimension_values_.Cumuls(path).front().max;
2487 global_start_max = std::min(global_start_max, start_max);
2488 }
2489 // Find global values of unchanged paths, merge with those of changed paths.
2490 for (const auto& [span_min, path] : paths_by_decreasing_span_min_) {
2491 if (dimension_values_.PathHasChanged(path)) continue;
2492 global_span_min = std::max(global_span_min, span_min);
2493 break;
2494 }
2495 for (const auto& [end_min, path] : paths_by_decreasing_end_min_) {
2496 if (dimension_values_.PathHasChanged(path)) continue;
2497 global_end_min = std::max(global_end_min, end_min);
2498 break;
2499 }
2500 for (const auto& [start_max, path] : paths_by_increasing_start_max_) {
2501 if (dimension_values_.PathHasChanged(path)) continue;
2502 global_start_max = std::min(global_start_max, start_max);
2503 break;
2504 }
2505 // Compute global span min.
2506 global_span_min =
2507 std::max(global_span_min, CapSub(global_end_min, global_start_max));
2508 const int64_t global_span_cost =
2509 CapProd(global_span_min, global_span_cost_coefficient_);
2510 CapSubFrom(global_span_cost_.Get(), &accepted_objective_value_);
2511 global_span_cost_.Set(global_span_cost);
2512 CapAddTo(global_span_cost, &accepted_objective_value_);
2513 }
2514
2515 // Filtering on objective value, calling LPs and MIPs if needed.
2516 if (may_use_optimizers_ && lp_optimizer_ != nullptr &&
2517 accepted_objective_value_ <= objective_max) {
2518 std::vector<int> paths_requiring_mp_optimizer;
2519 // TODO(user): Further optimize the LPs when we find feasible-only
2520 // solutions with the original time shares, if there's time left in the end.
2521 int solve_duration_shares = dimension_values_.ChangedPaths().size();
2522 for (const int vehicle : dimension_values_.ChangedPaths()) {
2523 if (!FilterWithDimensionCumulOptimizerForVehicle(vehicle)) {
2524 continue;
2525 }
2526 int64_t path_cost_with_lp = 0;
2527 const DimensionSchedulingStatus status =
2528 lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
2529 vehicle, /*solve_duration_ratio=*/1.0 / solve_duration_shares,
2530 path_accessor_, /*resource=*/nullptr,
2531 filter_objective_cost_ ? &path_cost_with_lp : nullptr);
2532 solve_duration_shares--;
2533 if (status == DimensionSchedulingStatus::INFEASIBLE) return false;
2534 // Replace previous path cost with the LP optimizer cost.
2535 if (filter_objective_cost_ &&
2536 (status == DimensionSchedulingStatus::OPTIMAL ||
2537 status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) &&
2538 path_cost_with_lp > cost_of_path_.Get(vehicle)) {
2539 CapSubFrom(cost_of_path_.Get(vehicle), &accepted_objective_value_);
2540 CapAddTo(path_cost_with_lp, &accepted_objective_value_);
2541 if (accepted_objective_value_ > objective_max) return false;
2542 cost_of_path_.Set(vehicle, path_cost_with_lp);
2543 }
2544 // Mark whether the cost could be improved by using mp_optimizer.
2545 DCHECK_NE(mp_optimizer_, nullptr);
2546 if (FilterVehicleBreaks(vehicle) ||
2547 (filter_objective_cost_ &&
2548 (FilterSoftSpanQuadraticCost(vehicle) ||
2549 status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY))) {
2550 paths_requiring_mp_optimizer.push_back(vehicle);
2551 }
2552 }
2553
2554 DCHECK_LE(accepted_objective_value_, objective_max);
2555
2556 solve_duration_shares = paths_requiring_mp_optimizer.size();
2557 for (const int vehicle : paths_requiring_mp_optimizer) {
2558 int64_t path_cost_with_mp = 0;
2559 const DimensionSchedulingStatus status =
2560 mp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
2561 vehicle, /*solve_duration_ratio=*/1.0 / solve_duration_shares,
2562 path_accessor_, /*resource=*/nullptr,
2563 filter_objective_cost_ ? &path_cost_with_mp : nullptr);
2564 solve_duration_shares--;
2565 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2566 return false;
2567 }
2568 if (filter_objective_cost_ &&
2569 status == DimensionSchedulingStatus::OPTIMAL &&
2570 path_cost_with_mp > cost_of_path_.Get(vehicle)) {
2571 CapSubFrom(cost_of_path_.Get(vehicle), &accepted_objective_value_);
2572 CapAddTo(path_cost_with_mp, &accepted_objective_value_);
2573 if (accepted_objective_value_ > objective_max) return false;
2574 cost_of_path_.Set(vehicle, path_cost_with_mp);
2575 }
2576 }
2577 }
2578 return accepted_objective_value_ <= objective_max;
2579}
2580
2581void PathCumulFilter::OnBeforeSynchronizePaths(bool synchronizing_all_paths) {
2582 if (synchronizing_all_paths) {
2583 // All paths are being synchronized, so we can reset all the data and let
2584 // OnSynchronizePathFromStart() calls recompute everything. Otherwise we let
2585 // the InitializeAcceptPath() call below revert the data structures.
2586 dimension_values_.Reset();
2587 cost_of_path_.SetAllAndCommit(0);
2588 location_of_node_.SetAllAndCommit({-1, -1});
2589 global_span_cost_.SetAndCommit(0);
2590 synchronized_objective_value_ = 0; // Accept() relies on this value.
2591 }
2592
2593 accepted_objective_value_ = synchronized_objective_value_;
2594 if (HasAnySyncedPath()) {
2595 // Revert the data structures that are used to compute the bounds and
2596 // pathwise costs of the new solution incrementally.
2597 InitializeAcceptPath();
2598 }
2599}
2600
2601void PathCumulFilter::OnSynchronizePathFromStart(int64_t start) {
2602 DCHECK(IsVarSynced(start));
2603 // Using AcceptPath() to compute the bounds and pathwise costs of the new
2604 // solution incrementally.
2605 const int path = GetPath(start);
2606 const bool is_accepted = AcceptPath(start, start, End(path));
2607 DCHECK(is_accepted);
2608}
2609
2610void PathCumulFilter::OnAfterSynchronizePaths() {
2611 if (filter_objective_cost_ && FilterGlobalSpanCost()) {
2612 // TODO(user): check whether this could go into FinalizeAcceptPath(),
2613 // with a Commit() on some data structure.
2614 paths_by_decreasing_span_min_.clear();
2615 paths_by_decreasing_end_min_.clear();
2616 paths_by_increasing_start_max_.clear();
2617 for (int path = 0; path < NumPaths(); ++path) {
2618 const int num_path_nodes = dimension_values_.Nodes(path).size();
2619 if (num_path_nodes == 0 ||
2620 (num_path_nodes == 2 &&
2621 !routing_model_.IsVehicleUsedWhenEmpty(path))) {
2622 continue;
2623 }
2624 paths_by_decreasing_span_min_.push_back(
2625 {.value = dimension_values_.Span(path).min, .path = path});
2626 paths_by_decreasing_end_min_.push_back(
2627 {.value = dimension_values_.Cumuls(path).back().min, .path = path});
2628 paths_by_increasing_start_max_.push_back(
2629 {.value = dimension_values_.Cumuls(path)[0].max, .path = path});
2630 }
2631 absl::c_sort(paths_by_decreasing_span_min_, std::greater<ValuedPath>());
2632 absl::c_sort(paths_by_decreasing_end_min_, std::greater<ValuedPath>());
2633 absl::c_sort(paths_by_increasing_start_max_);
2634 }
2635 // Using FinalizeAcceptPath() to commit all data structures to this state.
2636 FinalizeAcceptPath(kint64min, kint64max);
2637 dimension_values_.Commit();
2638 cost_of_path_.Commit();
2639 global_span_cost_.Commit();
2640 location_of_node_.Commit();
2641 synchronized_objective_value_ = accepted_objective_value_;
2642}
2643
2644} // namespace
2645
2647 bool propagate_own_objective_value,
2648 bool filter_objective_cost,
2649 bool may_use_optimizers) {
2650 RoutingModel& model = *dimension.model();
2651 return model.solver()->RevAlloc(
2652 new PathCumulFilter(model, dimension, propagate_own_objective_value,
2653 filter_objective_cost, may_use_optimizers));
2654}
2655
2656namespace {
2657
2658bool DimensionHasCumulCost(const RoutingDimension& dimension) {
2659 if (dimension.global_span_cost_coefficient() != 0) return true;
2660 if (dimension.HasSoftSpanUpperBounds()) return true;
2661 if (dimension.HasQuadraticCostSoftSpanUpperBounds()) return true;
2662 if (absl::c_any_of(dimension.vehicle_span_cost_coefficients(),
2663 [](int64_t coefficient) { return coefficient != 0; })) {
2664 return true;
2665 }
2666 if (absl::c_any_of(dimension.vehicle_slack_cost_coefficients(),
2667 [](int64_t coefficient) { return coefficient != 0; })) {
2668 return true;
2669 }
2670 for (int i = 0; i < dimension.cumuls().size(); ++i) {
2671 if (dimension.HasCumulVarSoftUpperBound(i)) return true;
2672 if (dimension.HasCumulVarSoftLowerBound(i)) return true;
2673 if (dimension.HasCumulVarPiecewiseLinearCost(i)) return true;
2674 }
2675 return false;
2676}
2677
2678bool DimensionHasPathCumulConstraint(const RoutingDimension& dimension) {
2679 if (dimension.HasBreakConstraints()) return true;
2680 if (dimension.HasPickupToDeliveryLimits()) return true;
2681 if (absl::c_any_of(
2682 dimension.vehicle_span_upper_bounds(), [](int64_t upper_bound) {
2683 return upper_bound != std::numeric_limits<int64_t>::max();
2684 })) {
2685 return true;
2686 }
2687 if (absl::c_any_of(dimension.slacks(),
2688 [](IntVar* slack) { return slack->Min() > 0; })) {
2689 return true;
2690 }
2691 const std::vector<IntVar*>& cumuls = dimension.cumuls();
2692 for (int i = 0; i < cumuls.size(); ++i) {
2693 IntVar* const cumul_var = cumuls[i];
2694 if (cumul_var->Min() > 0 &&
2695 cumul_var->Max() < std::numeric_limits<int64_t>::max() &&
2696 !dimension.model()->IsEnd(i)) {
2697 return true;
2698 }
2699 if (dimension.forbidden_intervals()[i].NumIntervals() > 0) return true;
2700 }
2701 return false;
2702}
2703
2704} // namespace
2705
2707 const PathState* path_state,
2708 const std::vector<RoutingDimension*>& dimensions,
2709 std::vector<LocalSearchFilterManager::FilterEvent>* filters) {
2710 using Interval = DimensionChecker::Interval;
2711 // For every dimension that fits, add a DimensionChecker.
2712 // Add a DimensionChecker for every dimension.
2713 for (const RoutingDimension* dimension : dimensions) {
2714 // Fill path capacities and classes.
2715 const int num_vehicles = dimension->model()->vehicles();
2716 std::vector<Interval> path_capacity(num_vehicles);
2717 std::vector<int> path_class(num_vehicles);
2718 for (int v = 0; v < num_vehicles; ++v) {
2719 const auto& vehicle_capacities = dimension->vehicle_capacities();
2720 path_capacity[v] = {0, vehicle_capacities[v]};
2721 path_class[v] = dimension->vehicle_to_class(v);
2722 }
2723 // For each class, retrieve the demands of each node.
2724 // Dimension store evaluators with a double indirection for compacity:
2725 // vehicle -> vehicle_class -> evaluator_index.
2726 // We replicate this in DimensionChecker,
2727 // except we expand evaluator_index to an array of values for all nodes.
2728 const int num_vehicle_classes =
2729 1 + *std::max_element(path_class.begin(), path_class.end());
2730 const int num_cumuls = dimension->cumuls().size();
2731 const int num_slacks = dimension->slacks().size();
2732 std::vector<std::function<Interval(int64_t, int64_t)>> transits(
2733 num_vehicle_classes, nullptr);
2734 for (int vehicle = 0; vehicle < num_vehicles; ++vehicle) {
2735 const int vehicle_class = path_class[vehicle];
2736 if (transits[vehicle_class] != nullptr) continue;
2737 const auto& unary_evaluator =
2738 dimension->GetUnaryTransitEvaluator(vehicle);
2739 if (unary_evaluator != nullptr) {
2740 transits[vehicle_class] = [&unary_evaluator, dimension, num_slacks](
2741 int64_t node, int64_t) -> Interval {
2742 if (node >= num_slacks) return {0, 0};
2743 const int64_t min_transit = unary_evaluator(node);
2744 const int64_t max_transit =
2745 CapAdd(min_transit, dimension->SlackVar(node)->Max());
2746 return {min_transit, max_transit};
2747 };
2748 } else {
2749 const auto& binary_evaluator =
2750 dimension->GetBinaryTransitEvaluator(vehicle);
2751
2752 transits[vehicle_class] = [&binary_evaluator, dimension, num_slacks](
2753 int64_t node, int64_t next) -> Interval {
2754 if (node >= num_slacks) return {0, 0};
2755 const int64_t min_transit = binary_evaluator(node, next);
2756 const int64_t max_transit =
2757 CapAdd(min_transit, dimension->SlackVar(node)->Max());
2758 return {min_transit, max_transit};
2759 };
2760 }
2761 }
2762 // Fill node capacities.
2763 std::vector<Interval> node_capacity(num_cumuls);
2764 for (int node = 0; node < num_cumuls; ++node) {
2765 const IntVar* cumul = dimension->CumulVar(node);
2766 node_capacity[node] = {cumul->Min(), cumul->Max()};
2767 }
2768 // Make the dimension checker and pass ownership to the filter.
2769 auto checker = std::make_unique<DimensionChecker>(
2770 path_state, std::move(path_capacity), std::move(path_class),
2771 std::move(transits), std::move(node_capacity));
2774 dimension->model()->solver(), std::move(checker), dimension->name());
2775 filters->push_back({filter, kAccept});
2776 }
2777}
2778
2780 const std::vector<RoutingDimension*>& dimensions,
2781 const RoutingSearchParameters& parameters, bool filter_objective_cost,
2782 bool use_chain_cumul_filter,
2783 std::vector<LocalSearchFilterManager::FilterEvent>* filters) {
2785 // Filter priority depth increases with complexity of filtering.
2786 // - Dimensions without any cumul-related costs or constraints will have a
2787 // ChainCumulFilter, lowest priority depth.
2788 // - Dimensions with cumul costs or constraints, but no global span cost
2789 // and/or precedences will have a PathCumulFilter.
2790 // - Dimensions with a global span cost coefficient and/or precedences will
2791 // have a global LP filter.
2792 const int num_dimensions = dimensions.size();
2793
2794 const bool has_dimension_optimizers =
2796 std::vector<bool> use_path_cumul_filter(num_dimensions);
2797 std::vector<bool> use_cumul_bounds_propagator_filter(num_dimensions);
2798 std::vector<bool> use_global_lp_filter(num_dimensions);
2799 std::vector<bool> use_resource_assignment_filter(num_dimensions);
2800 for (int d = 0; d < num_dimensions; d++) {
2801 const RoutingDimension& dimension = *dimensions[d];
2802 const bool has_cumul_cost = DimensionHasCumulCost(dimension);
2803 use_path_cumul_filter[d] =
2804 has_cumul_cost || DimensionHasPathCumulConstraint(dimension);
2805
2806 const int num_dimension_resource_groups =
2807 dimension.model()->GetDimensionResourceGroupIndices(&dimension).size();
2808 const bool can_use_cumul_bounds_propagator_filter =
2809 !dimension.HasBreakConstraints() &&
2810 num_dimension_resource_groups == 0 &&
2811 (!filter_objective_cost || !has_cumul_cost);
2812 const bool has_precedences = !dimension.GetNodePrecedences().empty();
2813 use_global_lp_filter[d] =
2814 has_dimension_optimizers &&
2815 ((has_precedences && !can_use_cumul_bounds_propagator_filter) ||
2816 (filter_objective_cost &&
2817 dimension.global_span_cost_coefficient() > 0) ||
2818 num_dimension_resource_groups > 1);
2819
2820 use_cumul_bounds_propagator_filter[d] =
2821 has_precedences && !use_global_lp_filter[d];
2822
2823 use_resource_assignment_filter[d] =
2824 has_dimension_optimizers && num_dimension_resource_groups > 0;
2825 }
2826
2827 for (int d = 0; d < num_dimensions; d++) {
2828 const RoutingDimension& dimension = *dimensions[d];
2829 const RoutingModel& model = *dimension.model();
2830 // NOTE: We always add the [Chain|Path]CumulFilter to filter each route's
2831 // feasibility separately to try and cut bad decisions earlier in the
2832 // search, but we don't propagate the computed cost if the LPCumulFilter is
2833 // already doing it.
2834 const bool use_global_lp = use_global_lp_filter[d];
2835 const bool filter_resource_assignment = use_resource_assignment_filter[d];
2836 if (use_path_cumul_filter[d]) {
2837 PathCumulFilter* filter = model.solver()->RevAlloc(new PathCumulFilter(
2838 model, dimension, /*propagate_own_objective_value*/ !use_global_lp &&
2839 !filter_resource_assignment,
2840 filter_objective_cost, has_dimension_optimizers));
2841 const int priority = filter->UsesDimensionOptimizers() ? 1 : 0;
2842 filters->push_back({filter, kAccept, priority});
2843 } else if (use_chain_cumul_filter) {
2844 filters->push_back(
2845 {model.solver()->RevAlloc(new ChainCumulFilter(model, dimension)),
2846 kAccept, /*priority*/ 0});
2847 }
2848
2849 if (use_cumul_bounds_propagator_filter[d]) {
2850 DCHECK(!use_global_lp);
2851 DCHECK(!filter_resource_assignment);
2852 filters->push_back({MakeCumulBoundsPropagatorFilter(dimension), kAccept,
2853 /*priority*/ 2});
2854 }
2855
2856 if (filter_resource_assignment) {
2857 filters->push_back({MakeResourceAssignmentFilter(
2858 model.GetMutableLocalCumulLPOptimizer(dimension),
2859 model.GetMutableLocalCumulMPOptimizer(dimension),
2860 /*propagate_own_objective_value*/ !use_global_lp,
2861 filter_objective_cost),
2862 kAccept, /*priority*/ 3});
2863 }
2864
2865 if (use_global_lp) {
2866 filters->push_back({MakeGlobalLPCumulFilter(
2867 model.GetMutableGlobalCumulLPOptimizer(dimension),
2868 model.GetMutableGlobalCumulMPOptimizer(dimension),
2869 filter_objective_cost),
2870 kAccept, /*priority*/ 4});
2871 }
2872 }
2873}
2874
2875namespace {
2876
2877// Filter for pickup/delivery precedences.
2878class PickupDeliveryFilter : public LocalSearchFilter {
2879 public:
2880 PickupDeliveryFilter(const PathState* path_state,
2881 absl::Span<const PickupDeliveryPair> pairs,
2882 const std::vector<RoutingModel::PickupAndDeliveryPolicy>&
2883 vehicle_policies);
2884 ~PickupDeliveryFilter() override = default;
2885 bool Accept(const Assignment* /*delta*/, const Assignment* /*deltadelta*/,
2886 int64_t /*objective_min*/, int64_t /*objective_max*/) override;
2887
2888 void Reset() override;
2889 void Synchronize(const Assignment* /*assignment*/,
2890 const Assignment* /*delta*/) override;
2891 std::string DebugString() const override { return "PickupDeliveryFilter"; }
2892
2893 private:
2894 template <bool check_assigned_pairs>
2895 bool AcceptPathDispatch();
2896 template <bool check_assigned_pairs>
2897 bool AcceptPathDefault(int path);
2898 template <bool lifo, bool check_assigned_pairs>
2899 bool AcceptPathOrdered(int path);
2900 void AssignAllVisitedPairsAndLoopNodes();
2901
2902 const PathState* const path_state_;
2903 struct PairInfo {
2904 // @TODO(user): Use default member initializers once we drop C++17
2905 // support on github.
2906 bool is_paired : 1;
2907 bool is_pickup : 1;
2908 int pair_index : 30;
2909 PairInfo() : is_paired(false), pair_index(-1) {}
2910 PairInfo(bool is_paired, bool is_pickup, int pair_index)
2911 : is_paired(is_paired), is_pickup(is_pickup), pair_index(pair_index) {}
2912 };
2913 std::vector<PairInfo> pair_info_of_node_;
2914 struct PairStatus {
2915 // @TODO(user): Use default member initializers once we drop C++17
2916 // support on github.
2917 bool pickup : 1;
2918 bool delivery : 1;
2919 PairStatus() : pickup(false), delivery(false) {}
2920 };
2921 CommittableArray<PairStatus> assigned_status_of_pair_;
2922 SparseBitset<int> pair_is_open_;
2923 CommittableValue<int> num_assigned_pairs_;
2924 std::deque<int> visited_deque_;
2925 const std::vector<RoutingModel::PickupAndDeliveryPolicy> vehicle_policies_;
2926};
2927
2928PickupDeliveryFilter::PickupDeliveryFilter(
2929 const PathState* path_state, absl::Span<const PickupDeliveryPair> pairs,
2930 const std::vector<RoutingModel::PickupAndDeliveryPolicy>& vehicle_policies)
2931 : path_state_(path_state),
2932 pair_info_of_node_(path_state->NumNodes()),
2933 assigned_status_of_pair_(pairs.size(), {}),
2934 pair_is_open_(pairs.size()),
2935 num_assigned_pairs_(0),
2936 vehicle_policies_(vehicle_policies) {
2937 for (int pair_index = 0; pair_index < pairs.size(); ++pair_index) {
2938 const auto& [pickups, deliveries] = pairs[pair_index];
2939 for (const int pickup : pickups) {
2940 pair_info_of_node_[pickup] =
2941 // @TODO(user): Use aggregate initialization once we drop C++17
2942 // support on github.
2943 PairInfo{/*is_paired=*/true, /*is_pickup=*/true,
2944 /*pair_index=*/pair_index};
2945 }
2946 for (const int delivery : deliveries) {
2947 pair_info_of_node_[delivery] =
2948 // @TODO(user): Use aggregate initialization once we drop C++17
2949 // support on github.
2950 PairInfo{/*is_paired=*/true, /*is_pickup=*/false,
2951 /*pair_index=*/pair_index};
2952 }
2953 }
2954}
2955
2956void PickupDeliveryFilter::Reset() {
2957 assigned_status_of_pair_.SetAllAndCommit({});
2958 num_assigned_pairs_.SetAndCommit(0);
2959}
2960
2961void PickupDeliveryFilter::AssignAllVisitedPairsAndLoopNodes() {
2962 assigned_status_of_pair_.Revert();
2963 num_assigned_pairs_.Revert();
2964 int num_assigned_pairs = num_assigned_pairs_.Get();
2965 if (num_assigned_pairs == assigned_status_of_pair_.Size()) return;
2966 // If node is a pickup or delivery, this sets the assigned_status_of_pair_
2967 // status to true, and returns true if the whole pair *became* assigned.
2968 auto assign_node = [this](int node) -> bool {
2969 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2970 if (!is_paired) return false;
2971 bool assigned_pair = false;
2972 PairStatus assigned_status = assigned_status_of_pair_.Get(pair_index);
2973 if (is_pickup && !assigned_status.pickup) {
2974 assigned_pair = assigned_status.delivery;
2975 assigned_status.pickup = true;
2976 assigned_status_of_pair_.Set(pair_index, assigned_status);
2977 }
2978 if (!is_pickup && !assigned_status.delivery) {
2979 assigned_pair = assigned_status.pickup;
2980 assigned_status.delivery = true;
2981 assigned_status_of_pair_.Set(pair_index, assigned_status);
2982 }
2983 return assigned_pair;
2984 };
2985 for (const int path : path_state_->ChangedPaths()) {
2986 for (const int node : path_state_->Nodes(path)) {
2987 num_assigned_pairs += assign_node(node) ? 1 : 0;
2988 }
2989 }
2990 for (const int loop : path_state_->ChangedLoops()) {
2991 num_assigned_pairs += assign_node(loop) ? 1 : 0;
2992 }
2993 num_assigned_pairs_.Set(num_assigned_pairs);
2994}
2995
2996void PickupDeliveryFilter::Synchronize(const Assignment* /*assignment*/,
2997 const Assignment* /*delta*/) {
2998 AssignAllVisitedPairsAndLoopNodes();
2999 assigned_status_of_pair_.Commit();
3000 num_assigned_pairs_.Commit();
3001}
3002
3003bool PickupDeliveryFilter::Accept(const Assignment* /*delta*/,
3004 const Assignment* /*deltadelta*/,
3005 int64_t /*objective_min*/,
3006 int64_t /*objective_max*/) {
3007 if (path_state_->IsInvalid()) return true; // Protect against CP-LNS.
3008 AssignAllVisitedPairsAndLoopNodes();
3009 const bool check_assigned_pairs =
3010 num_assigned_pairs_.Get() < assigned_status_of_pair_.Size();
3011 if (check_assigned_pairs) {
3012 return AcceptPathDispatch<true>();
3013 } else {
3014 return AcceptPathDispatch<false>();
3015 }
3016}
3017
3018template <bool check_assigned_pairs>
3019bool PickupDeliveryFilter::AcceptPathDispatch() {
3020 for (const int path : path_state_->ChangedPaths()) {
3021 switch (vehicle_policies_[path]) {
3022 case RoutingModel::PICKUP_AND_DELIVERY_NO_ORDER:
3023 if (!AcceptPathDefault<check_assigned_pairs>(path)) return false;
3024 break;
3025 case RoutingModel::PICKUP_AND_DELIVERY_LIFO:
3026 if (!AcceptPathOrdered<true, check_assigned_pairs>(path)) return false;
3027 break;
3028 case RoutingModel::PICKUP_AND_DELIVERY_FIFO:
3029 if (!AcceptPathOrdered<false, check_assigned_pairs>(path)) return false;
3030 break;
3031 default:
3032 continue;
3033 }
3034 }
3035 return true;
3036}
3037
3038template <bool check_assigned_pairs>
3039bool PickupDeliveryFilter::AcceptPathDefault(int path) {
3040 pair_is_open_.ResetAllToFalse();
3041 int num_opened_pairs = 0;
3042 for (const int node : path_state_->Nodes(path)) {
3043 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
3044 if (!is_paired) continue;
3045 if constexpr (check_assigned_pairs) {
3046 const PairStatus status = assigned_status_of_pair_.Get(pair_index);
3047 if (!status.pickup || !status.delivery) continue;
3048 }
3049 if (is_pickup) {
3050 pair_is_open_.Set(pair_index);
3051 ++num_opened_pairs;
3052 } else {
3053 if (!pair_is_open_[pair_index]) return false;
3054 pair_is_open_.Clear(pair_index);
3055 --num_opened_pairs;
3056 }
3057 }
3058 // For all visited pickup/delivery where both sides are assigned,
3059 // the whole pair must be visited.
3060 if (num_opened_pairs > 0) return false;
3061 pair_is_open_.NotifyAllClear();
3062 return true;
3063}
3064
3065template <bool lifo, bool check_assigned_pairs>
3066bool PickupDeliveryFilter::AcceptPathOrdered(int path) {
3067 visited_deque_.clear();
3068 for (const int node : path_state_->Nodes(path)) {
3069 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
3070 if (!is_paired) continue;
3071 if constexpr (check_assigned_pairs) {
3072 const PairStatus status = assigned_status_of_pair_.Get(pair_index);
3073 if (!status.pickup || !status.delivery) continue;
3074 }
3075 if (is_pickup) {
3076 visited_deque_.emplace_back(pair_index);
3077 } else {
3078 if (visited_deque_.empty()) return false;
3079 if constexpr (lifo) {
3080 const int last_pair_index = visited_deque_.back();
3081 if (last_pair_index != pair_index) return false;
3082 visited_deque_.pop_back();
3083 } else {
3084 const int first_pair_index = visited_deque_.front();
3085 if (first_pair_index != pair_index) return false;
3086 visited_deque_.pop_front();
3087 }
3088 }
3089 }
3090 return visited_deque_.empty();
3091}
3092
3093} // namespace
3094
3096 const RoutingModel& routing_model, const PathState* path_state,
3097 absl::Span<const PickupDeliveryPair> pairs,
3098 const std::vector<RoutingModel::PickupAndDeliveryPolicy>&
3099 vehicle_policies) {
3100 return routing_model.solver()->RevAlloc(
3101 new PickupDeliveryFilter(path_state, pairs, vehicle_policies));
3102}
3103
3104namespace {
3105
3106// Vehicle variable filter
3107class VehicleVarFilter : public LocalSearchFilter {
3108 public:
3109 VehicleVarFilter(const RoutingModel& routing_model,
3110 const PathState* path_state);
3111 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3112 int64_t objective_min, int64_t objective_max) override;
3113 void Synchronize(const Assignment* /*assignment*/,
3114 const Assignment* /*delta*/) override;
3115 std::string DebugString() const override { return "VehicleVariableFilter"; }
3116
3117 private:
3118 bool HasConstrainedVehicleVars() const;
3119
3120 const PathState* path_state_;
3121 std::vector<IntVar*> vehicle_vars_;
3122 const int num_vehicles_;
3123 bool is_disabled_;
3124};
3125
3126VehicleVarFilter::VehicleVarFilter(const RoutingModel& routing_model,
3127 const PathState* path_state)
3128 : path_state_(path_state),
3129 vehicle_vars_(routing_model.VehicleVars()),
3130 num_vehicles_(routing_model.vehicles()),
3131 is_disabled_(!HasConstrainedVehicleVars()) {}
3132
3133bool VehicleVarFilter::HasConstrainedVehicleVars() const {
3134 for (const IntVar* var : vehicle_vars_) {
3135 const int unconstrained_size = num_vehicles_ + ((var->Min() >= 0) ? 0 : 1);
3136 if (var->Size() != unconstrained_size) return true;
3137 }
3138 return false;
3139}
3140
3141void VehicleVarFilter::Synchronize(const Assignment* /*assignment*/,
3142 const Assignment* /*delta*/) {
3143 is_disabled_ = !HasConstrainedVehicleVars();
3144}
3145
3146bool VehicleVarFilter::Accept(const Assignment* /*delta*/,
3147 const Assignment* /*deltadelta*/,
3148 int64_t /*objective_min*/,
3149 int64_t /*objective_max*/) {
3150 if (is_disabled_) return true;
3151 for (const int path : path_state_->ChangedPaths()) {
3152 // First and last chain are committed on the vehicle, no need to check.
3153 for (const PathState::Chain chain :
3154 path_state_->Chains(path).DropFirstChain().DropLastChain()) {
3155 for (const int node : chain) {
3156 if (!vehicle_vars_[node]->Contains(path)) return false;
3157 }
3158 }
3159 }
3160 return true;
3161}
3162
3163} // namespace
3164
3166 const PathState* path_state) {
3167 return routing_model.solver()->RevAlloc(
3168 new VehicleVarFilter(routing_model, path_state));
3169}
3170
3171namespace {
3172
3173class CumulBoundsPropagatorFilter : public IntVarLocalSearchFilter {
3174 public:
3175 explicit CumulBoundsPropagatorFilter(const RoutingDimension& dimension);
3176 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3177 int64_t objective_min, int64_t objective_max) override;
3178 std::string DebugString() const override {
3179 return "CumulBoundsPropagatorFilter(" + propagator_.dimension().name() +
3180 ")";
3181 }
3182
3183 private:
3184 CumulBoundsPropagator propagator_;
3185 const int64_t cumul_offset_;
3186 SparseBitset<int64_t> delta_touched_;
3187 std::vector<int64_t> delta_nexts_;
3188};
3189
3190CumulBoundsPropagatorFilter::CumulBoundsPropagatorFilter(
3191 const RoutingDimension& dimension)
3192 : IntVarLocalSearchFilter(dimension.model()->Nexts()),
3193 propagator_(&dimension),
3194 cumul_offset_(dimension.GetGlobalOptimizerOffset()),
3195 delta_touched_(Size()),
3196 delta_nexts_(Size()) {}
3197
3198bool CumulBoundsPropagatorFilter::Accept(const Assignment* delta,
3199 const Assignment* /*deltadelta*/,
3200 int64_t /*objective_min*/,
3201 int64_t /*objective_max*/) {
3202 delta_touched_.ResetAllToFalse();
3203 for (const IntVarElement& delta_element :
3204 delta->IntVarContainer().elements()) {
3205 int64_t index = -1;
3206 if (FindIndex(delta_element.Var(), &index)) {
3207 if (!delta_element.Bound()) {
3208 // LNS detected
3209 return true;
3210 }
3211 delta_touched_.Set(index);
3212 delta_nexts_[index] = delta_element.Value();
3213 }
3214 }
3215 const auto& next_accessor = [this](int64_t index) -> int64_t {
3216 return delta_touched_[index] ? delta_nexts_[index]
3217 : !IsVarSynced(index) ? -1
3218 : Value(index);
3219 };
3220
3221 return propagator_.PropagateCumulBounds(next_accessor, cumul_offset_);
3222}
3223
3224} // namespace
3225
3227 const RoutingDimension& dimension) {
3228 return dimension.model()->solver()->RevAlloc(
3229 new CumulBoundsPropagatorFilter(dimension));
3230}
3231
3232namespace {
3233
3234class LPCumulFilter : public IntVarLocalSearchFilter {
3235 public:
3236 LPCumulFilter(const std::vector<IntVar*>& nexts,
3237 GlobalDimensionCumulOptimizer* optimizer,
3238 GlobalDimensionCumulOptimizer* mp_optimizer,
3239 bool filter_objective_cost);
3240 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3241 int64_t objective_min, int64_t objective_max) override;
3242 int64_t GetAcceptedObjectiveValue() const override;
3243 void OnSynchronize(const Assignment* delta) override;
3244 int64_t GetSynchronizedObjectiveValue() const override;
3245 std::string DebugString() const override {
3246 return "LPCumulFilter(" + lp_optimizer_.dimension()->name() + ")";
3247 }
3248
3249 private:
3250 GlobalDimensionCumulOptimizer& lp_optimizer_;
3251 GlobalDimensionCumulOptimizer& mp_optimizer_;
3252 const bool filter_objective_cost_;
3253 int64_t synchronized_cost_without_transit_;
3254 int64_t delta_cost_without_transit_;
3255 SparseBitset<int64_t> delta_touched_;
3256 std::vector<int64_t> delta_nexts_;
3257};
3258
3259LPCumulFilter::LPCumulFilter(const std::vector<IntVar*>& nexts,
3260 GlobalDimensionCumulOptimizer* lp_optimizer,
3261 GlobalDimensionCumulOptimizer* mp_optimizer,
3262 bool filter_objective_cost)
3263 : IntVarLocalSearchFilter(nexts),
3264 lp_optimizer_(*lp_optimizer),
3265 mp_optimizer_(*mp_optimizer),
3266 filter_objective_cost_(filter_objective_cost),
3267 synchronized_cost_without_transit_(-1),
3268 delta_cost_without_transit_(-1),
3269 delta_touched_(Size()),
3270 delta_nexts_(Size()) {}
3271
3272bool LPCumulFilter::Accept(const Assignment* delta,
3273 const Assignment* /*deltadelta*/,
3274 int64_t /*objective_min*/, int64_t objective_max) {
3275 delta_touched_.ResetAllToFalse();
3276 for (const IntVarElement& delta_element :
3277 delta->IntVarContainer().elements()) {
3278 int64_t index = -1;
3279 if (FindIndex(delta_element.Var(), &index)) {
3280 if (!delta_element.Bound()) {
3281 // LNS detected
3282 return true;
3283 }
3284 delta_touched_.Set(index);
3285 delta_nexts_[index] = delta_element.Value();
3286 }
3287 }
3288 const auto& next_accessor = [this](int64_t index) {
3289 return delta_touched_[index] ? delta_nexts_[index]
3290 : !IsVarSynced(index) ? -1
3291 : Value(index);
3292 };
3293
3294 if (!filter_objective_cost_) {
3295 // No need to compute the cost of the LP, only verify its feasibility.
3296 delta_cost_without_transit_ = 0;
3297 DimensionSchedulingStatus status = lp_optimizer_.ComputeCumuls(
3298 next_accessor, {}, nullptr, nullptr, nullptr);
3299 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) {
3300 status = mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr,
3301 nullptr);
3302 }
3303 DCHECK(status != DimensionSchedulingStatus::FEASIBLE)
3304 << "FEASIBLE without filtering objective cost should be OPTIMAL";
3305 return status == DimensionSchedulingStatus::OPTIMAL;
3306 }
3307
3309 lp_optimizer_.ComputeCumulCostWithoutFixedTransits(
3310 next_accessor, &delta_cost_without_transit_);
3311
3312 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY ||
3313 status == DimensionSchedulingStatus::FEASIBLE) {
3314 const DimensionSchedulingStatus lp_status = status;
3315 int64_t mp_cost;
3316 status = mp_optimizer_.ComputeCumulCostWithoutFixedTransits(next_accessor,
3317 &mp_cost);
3318 if (lp_status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY &&
3319 status == DimensionSchedulingStatus::OPTIMAL) {
3320 // TRICKY: If the MP is only feasible, the computed cost isn't a lower
3321 // bound to the problem, so we keep the LP relaxation's lower bound
3322 // found by Glop.
3323 delta_cost_without_transit_ = mp_cost;
3324 } else if (lp_status == DimensionSchedulingStatus::FEASIBLE &&
3325 status != DimensionSchedulingStatus::INFEASIBLE) {
3326 // TRICKY: Since feasible costs are not lower bounds, we keep the lowest
3327 // of the costs between the LP-feasible and CP-SAT (feasible or optimal).
3328 delta_cost_without_transit_ =
3329 std::min(delta_cost_without_transit_, mp_cost);
3330 }
3331 }
3332
3333 if (status == DimensionSchedulingStatus::INFEASIBLE) {
3334 delta_cost_without_transit_ = std::numeric_limits<int64_t>::max();
3335 return false;
3336 }
3337 return delta_cost_without_transit_ <= objective_max;
3338}
3339
3340int64_t LPCumulFilter::GetAcceptedObjectiveValue() const {
3341 return delta_cost_without_transit_;
3342}
3343
3344void LPCumulFilter::OnSynchronize(const Assignment* /*delta*/) {
3345 // TODO(user): Try to optimize this so the LP is not called when the last
3346 // computed delta cost corresponds to the solution being synchronized.
3347 const RoutingModel& model = *lp_optimizer_.dimension()->model();
3348 const auto& next_accessor = [this, &model](int64_t index) {
3349 return IsVarSynced(index) ? Value(index)
3350 : model.IsStart(index) ? model.End(model.VehicleIndex(index))
3351 : -1;
3352 };
3353
3354 if (!filter_objective_cost_) {
3355 synchronized_cost_without_transit_ = 0;
3356 }
3358 filter_objective_cost_
3359 ? lp_optimizer_.ComputeCumulCostWithoutFixedTransits(
3360 next_accessor, &synchronized_cost_without_transit_)
3361 : lp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr,
3362 nullptr);
3363 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) {
3364 status = filter_objective_cost_
3365 ? mp_optimizer_.ComputeCumulCostWithoutFixedTransits(
3366 next_accessor, &synchronized_cost_without_transit_)
3367 : mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr,
3368 nullptr, nullptr);
3369 }
3370 if (status == DimensionSchedulingStatus::INFEASIBLE) {
3371 // TODO(user): This should only happen if the LP/MIP solver times out.
3372 // DCHECK the fail wasn't due to an infeasible model.
3373 synchronized_cost_without_transit_ = 0;
3374 }
3375}
3376
3377int64_t LPCumulFilter::GetSynchronizedObjectiveValue() const {
3378 return synchronized_cost_without_transit_;
3379}
3380
3381} // namespace
3382
3384 GlobalDimensionCumulOptimizer* lp_optimizer,
3385 GlobalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost) {
3386 DCHECK_NE(lp_optimizer, nullptr);
3387 DCHECK_NE(mp_optimizer, nullptr);
3388 const RoutingModel& model = *lp_optimizer->dimension()->model();
3389 return model.solver()->RevAlloc(new LPCumulFilter(
3390 model.Nexts(), lp_optimizer, mp_optimizer, filter_objective_cost));
3391}
3392
3393namespace {
3394
3395using ResourceGroup = RoutingModel::ResourceGroup;
3396
3397class ResourceGroupAssignmentFilter : public BasePathFilter {
3398 public:
3399 ResourceGroupAssignmentFilter(const std::vector<IntVar*>& nexts,
3400 const ResourceGroup* resource_group,
3401 LocalDimensionCumulOptimizer* lp_optimizer,
3402 LocalDimensionCumulOptimizer* mp_optimizer,
3403 bool filter_objective_cost);
3404 bool InitializeAcceptPath() override;
3405 bool AcceptPath(int64_t path_start, int64_t, int64_t) override;
3406 bool FinalizeAcceptPath(int64_t objective_min,
3407 int64_t objective_max) override;
3408 void OnBeforeSynchronizePaths(bool synchronizing_all_paths) override;
3409 void OnSynchronizePathFromStart(int64_t start) override;
3410 void OnAfterSynchronizePaths() override;
3411
3412 int64_t GetAcceptedObjectiveValue() const override {
3413 return lns_detected() ? 0 : delta_cost_without_transit_;
3414 }
3415 int64_t GetSynchronizedObjectiveValue() const override {
3416 return synchronized_cost_without_transit_;
3417 }
3418 std::string DebugString() const override {
3419 return "ResourceGroupAssignmentFilter(" + dimension_.name() + ")";
3420 }
3421
3422 private:
3423 using RCIndex = RoutingModel::ResourceClassIndex;
3424
3425 bool VehicleRequiresResourceAssignment(
3426 int vehicle, const std::function<int64_t(int64_t)>& next_accessor,
3427 bool* is_infeasible);
3428 int64_t ComputeRouteCumulCostWithoutResourceAssignment(
3429 int vehicle, const std::function<int64_t(int64_t)>& next_accessor) const;
3430
3431 const RoutingModel& model_;
3432 const RoutingDimension& dimension_;
3433 const ResourceGroup& resource_group_;
3434 LocalDimensionCumulOptimizer* lp_optimizer_;
3435 LocalDimensionCumulOptimizer* mp_optimizer_;
3436 const bool filter_objective_cost_;
3437 bool current_synch_failed_;
3438 int64_t synchronized_cost_without_transit_;
3439 int64_t delta_cost_without_transit_;
3440 std::vector<std::vector<int64_t>> vehicle_to_resource_class_assignment_costs_;
3441 std::vector<int> vehicles_requiring_resource_assignment_;
3442 std::vector<bool> vehicle_requires_resource_assignment_;
3443 std::vector<std::vector<int64_t>>
3444 delta_vehicle_to_resource_class_assignment_costs_;
3445 std::vector<int> delta_vehicles_requiring_resource_assignment_;
3446 std::vector<bool> delta_vehicle_requires_resource_assignment_;
3447
3448 std::vector<int> bound_resource_index_of_vehicle_;
3449 util_intops::StrongVector<RCIndex, absl::flat_hash_set<int>>
3450 ignored_resources_per_class_;
3451};
3452
3453ResourceGroupAssignmentFilter::ResourceGroupAssignmentFilter(
3454 const std::vector<IntVar*>& nexts, const ResourceGroup* resource_group,
3455 LocalDimensionCumulOptimizer* lp_optimizer,
3456 LocalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost)
3457 : BasePathFilter(nexts, lp_optimizer->dimension()->cumuls().size(),
3458 lp_optimizer->dimension()->model()->GetPathsMetadata()),
3459 model_(*lp_optimizer->dimension()->model()),
3460 dimension_(*lp_optimizer->dimension()),
3461 resource_group_(*resource_group),
3462 lp_optimizer_(lp_optimizer),
3463 mp_optimizer_(mp_optimizer),
3464 filter_objective_cost_(filter_objective_cost),
3465 current_synch_failed_(false),
3466 synchronized_cost_without_transit_(-1),
3467 delta_cost_without_transit_(-1) {
3468 vehicle_to_resource_class_assignment_costs_.resize(model_.vehicles());
3469 delta_vehicle_to_resource_class_assignment_costs_.resize(model_.vehicles());
3470}
3471
3472bool ResourceGroupAssignmentFilter::InitializeAcceptPath() {
3473 delta_vehicle_to_resource_class_assignment_costs_.assign(model_.vehicles(),
3474 {});
3475 if (current_synch_failed_) {
3476 return true;
3477 }
3478 // TODO(user): Keep track of num_used_vehicles internally and compute its
3479 // new value here by only going through the touched_paths_.
3480 int num_used_vehicles = 0;
3481 const int num_resources = resource_group_.Size();
3482 for (int v : resource_group_.GetVehiclesRequiringAResource()) {
3483 if (GetNext(model_.Start(v)) != model_.End(v) ||
3484 model_.IsVehicleUsedWhenEmpty(v)) {
3485 if (++num_used_vehicles > num_resources) {
3486 return false;
3487 }
3488 }
3489 }
3490 delta_vehicle_requires_resource_assignment_ =
3491 vehicle_requires_resource_assignment_;
3492 return true;
3493}
3494
3495bool ResourceGroupAssignmentFilter::AcceptPath(int64_t path_start, int64_t,
3496 int64_t) {
3497 if (current_synch_failed_) {
3498 return true;
3499 }
3500 const int vehicle = model_.VehicleIndex(path_start);
3501 bool is_infeasible = false;
3502 delta_vehicle_requires_resource_assignment_[vehicle] =
3503 VehicleRequiresResourceAssignment(
3504 vehicle, [this](int64_t n) { return GetNext(n); }, &is_infeasible);
3505 return !is_infeasible;
3506}
3507
3508bool ResourceGroupAssignmentFilter::FinalizeAcceptPath(
3509 int64_t /*objective_min*/, int64_t objective_max) {
3510 delta_cost_without_transit_ = 0;
3511 if (current_synch_failed_) {
3512 return true;
3513 }
3514 const auto& next_accessor = [this](int64_t index) { return GetNext(index); };
3515 delta_vehicles_requiring_resource_assignment_.clear();
3516 // First sum the costs of the routes not requiring resource assignment
3517 // (cheaper computations).
3518 for (int v = 0; v < model_.vehicles(); ++v) {
3519 if (delta_vehicle_requires_resource_assignment_[v]) {
3520 delta_vehicles_requiring_resource_assignment_.push_back(v);
3521 continue;
3522 }
3523 int64_t route_cost = 0;
3524 int64_t start = model_.Start(v);
3525 if (PathStartTouched(start)) {
3526 route_cost =
3527 ComputeRouteCumulCostWithoutResourceAssignment(v, next_accessor);
3528 if (route_cost < 0) {
3529 return false;
3530 }
3531 } else if (IsVarSynced(start)) {
3532 DCHECK_EQ(vehicle_to_resource_class_assignment_costs_[v].size(), 1);
3533 route_cost = vehicle_to_resource_class_assignment_costs_[v][0];
3534 }
3535 CapAddTo(route_cost, &delta_cost_without_transit_);
3536 if (delta_cost_without_transit_ > objective_max) {
3537 return false;
3538 }
3539 }
3540 // Recompute the assignment costs to resources for touched paths requiring
3541 // resource assignment.
3542 for (int64_t start : GetTouchedPathStarts()) {
3543 const int vehicle = model_.VehicleIndex(start);
3544 if (!delta_vehicle_requires_resource_assignment_[vehicle]) {
3545 // Already handled above.
3546 continue;
3547 }
3549 vehicle, /*solve_duration_ratio=*/1.0, resource_group_,
3550 ignored_resources_per_class_, next_accessor,
3551 dimension_.transit_evaluator(vehicle), filter_objective_cost_,
3552 lp_optimizer_, mp_optimizer_,
3553 &delta_vehicle_to_resource_class_assignment_costs_[vehicle],
3554 nullptr, nullptr)) {
3555 return false;
3556 }
3557 }
3558 const int64_t assignment_cost = ComputeBestVehicleToResourceAssignment(
3559 delta_vehicles_requiring_resource_assignment_,
3560 resource_group_.GetResourceIndicesPerClass(),
3561 ignored_resources_per_class_,
3562 /*vehicle_to_resource_class_assignment_costs=*/
3563 [this](int v) {
3564 return PathStartTouched(model_.Start(v))
3565 ? &delta_vehicle_to_resource_class_assignment_costs_[v]
3566 : &vehicle_to_resource_class_assignment_costs_[v];
3567 },
3568 nullptr);
3569 CapAddTo(assignment_cost, &delta_cost_without_transit_);
3570 return assignment_cost >= 0 && delta_cost_without_transit_ <= objective_max;
3571}
3572
3573void ResourceGroupAssignmentFilter::OnBeforeSynchronizePaths(bool) {
3574 if (!HasAnySyncedPath()) {
3575 vehicle_to_resource_class_assignment_costs_.assign(model_.vehicles(), {});
3576 }
3577 bound_resource_index_of_vehicle_.assign(model_.vehicles(), -1);
3578 vehicles_requiring_resource_assignment_.clear();
3579 vehicles_requiring_resource_assignment_.reserve(
3580 resource_group_.GetVehiclesRequiringAResource().size());
3581 vehicle_requires_resource_assignment_.assign(model_.vehicles(), false);
3582 ignored_resources_per_class_.assign(resource_group_.GetResourceClassesCount(),
3583 absl::flat_hash_set<int>());
3584
3585 for (int v : resource_group_.GetVehiclesRequiringAResource()) {
3586 const int64_t start = model_.Start(v);
3587 if (!IsVarSynced(start)) {
3588 continue;
3589 }
3590 vehicle_requires_resource_assignment_[v] =
3591 VehicleRequiresResourceAssignment(
3592 v, [this](int64_t n) { return Value(n); }, &current_synch_failed_);
3593 if (vehicle_requires_resource_assignment_[v]) {
3594 vehicles_requiring_resource_assignment_.push_back(v);
3595 }
3596 if (current_synch_failed_) {
3597 return;
3598 }
3599 }
3600 synchronized_cost_without_transit_ = 0;
3601}
3602
3603void ResourceGroupAssignmentFilter::OnSynchronizePathFromStart(int64_t start) {
3604 if (current_synch_failed_) return;
3605 DCHECK(IsVarSynced(start));
3606 const int v = model_.VehicleIndex(start);
3607 const auto& next_accessor = [this](int64_t index) {
3608 return IsVarSynced(index) ? Value(index) : -1;
3609 };
3610 if (!vehicle_requires_resource_assignment_[v]) {
3611 const int64_t route_cost =
3612 ComputeRouteCumulCostWithoutResourceAssignment(v, next_accessor);
3613 if (route_cost < 0) {
3614 current_synch_failed_ = true;
3615 return;
3616 }
3617 CapAddTo(route_cost, &synchronized_cost_without_transit_);
3618 vehicle_to_resource_class_assignment_costs_[v] = {route_cost};
3619 return;
3620 }
3621 // NOTE(user): Even if filter_objective_cost_ is false, we
3622 // still need to call ComputeVehicleToResourceClassAssignmentCosts() for every
3623 // vehicle requiring resource assignment to keep track of whether or not a
3624 // given vehicle-to-resource-class assignment is possible by storing 0 or -1
3625 // in vehicle_to_resource_class_assignment_costs_.
3626 // TODO(user): Adjust the 'solve_duration_ratio' below.
3628 v, /*solve_duration_ratio=*/1.0, resource_group_,
3629 ignored_resources_per_class_, next_accessor,
3630 dimension_.transit_evaluator(v), filter_objective_cost_,
3631 lp_optimizer_, mp_optimizer_,
3632 &vehicle_to_resource_class_assignment_costs_[v], nullptr, nullptr)) {
3633 vehicle_to_resource_class_assignment_costs_[v].assign(
3634 resource_group_.GetResourceClassesCount(), -1);
3635 current_synch_failed_ = true;
3636 }
3637}
3638
3639void ResourceGroupAssignmentFilter::OnAfterSynchronizePaths() {
3640 if (current_synch_failed_) {
3641 synchronized_cost_without_transit_ = 0;
3642 return;
3643 }
3644 if (!filter_objective_cost_) {
3645 DCHECK_EQ(synchronized_cost_without_transit_, 0);
3646 return;
3647 }
3648 const int64_t assignment_cost = ComputeBestVehicleToResourceAssignment(
3649 vehicles_requiring_resource_assignment_,
3650 resource_group_.GetResourceIndicesPerClass(),
3651 ignored_resources_per_class_,
3652 [this](int v) { return &vehicle_to_resource_class_assignment_costs_[v]; },
3653 nullptr);
3654 if (assignment_cost < 0) {
3655 synchronized_cost_without_transit_ = 0;
3656 current_synch_failed_ = true;
3657 } else {
3658 DCHECK_GE(synchronized_cost_without_transit_, 0);
3659 CapAddTo(assignment_cost, &synchronized_cost_without_transit_);
3660 }
3661}
3662
3663bool ResourceGroupAssignmentFilter::VehicleRequiresResourceAssignment(
3664 int vehicle, const std::function<int64_t(int64_t)>& next_accessor,
3665 bool* is_infeasible) {
3666 *is_infeasible = false;
3667 if (!resource_group_.VehicleRequiresAResource(vehicle)) return false;
3668 const IntVar* res_var = model_.ResourceVar(vehicle, resource_group_.Index());
3669 if (!model_.IsVehicleUsedWhenEmpty(vehicle) &&
3670 next_accessor(model_.Start(vehicle)) == model_.End(vehicle)) {
3671 if (res_var->Bound() && res_var->Value() >= 0) {
3672 // Vehicle with a resource (force-)assigned to it cannot be unused.
3673 *is_infeasible = true;
3674 }
3675 return false;
3676 }
3677 // Vehicle is used.
3678 if (res_var->Bound()) {
3679 // No need to do resource assignment for this vehicle.
3680 const int res = res_var->Value();
3681 if (res < 0) {
3682 // Vehicle has a negative resource index enforced but is used.
3683 *is_infeasible = true;
3684 } else {
3685 bound_resource_index_of_vehicle_[vehicle] = res;
3686 const RCIndex rc = resource_group_.GetResourceClassIndex(res);
3687 ignored_resources_per_class_[rc].insert(res);
3688 }
3689 return false;
3690 }
3691 // Vehicle is used and res_var isn't bound.
3692 return true;
3693}
3694
3695int64_t
3696ResourceGroupAssignmentFilter::ComputeRouteCumulCostWithoutResourceAssignment(
3697 int vehicle, const std::function<int64_t(int64_t)>& next_accessor) const {
3698 if (next_accessor(model_.Start(vehicle)) == model_.End(vehicle) &&
3699 !model_.IsVehicleUsedWhenEmpty(vehicle)) {
3700 return 0;
3701 }
3702 using Resource = RoutingModel::ResourceGroup::Resource;
3703 const Resource* resource = nullptr;
3704 if (resource_group_.VehicleRequiresAResource(vehicle)) {
3705 DCHECK_GE(bound_resource_index_of_vehicle_[vehicle], 0);
3706 resource =
3707 &resource_group_.GetResource(bound_resource_index_of_vehicle_[vehicle]);
3708 }
3709 int64_t route_cost = 0;
3710 const DimensionSchedulingStatus status =
3711 lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
3712 vehicle, /*solve_duration_ratio=*/1.0, next_accessor, resource,
3713 filter_objective_cost_ ? &route_cost : nullptr);
3714 switch (status) {
3715 case DimensionSchedulingStatus::INFEASIBLE:
3716 return -1;
3717 case DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY:
3718 if (mp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
3719 vehicle, /*solve_duration_ratio=*/1.0, next_accessor, resource,
3720 filter_objective_cost_ ? &route_cost : nullptr) ==
3721 DimensionSchedulingStatus::INFEASIBLE) {
3722 return -1;
3723 }
3724 break;
3725 default:
3726 DCHECK(status == DimensionSchedulingStatus::OPTIMAL ||
3727 status == DimensionSchedulingStatus::FEASIBLE);
3728 }
3729 return route_cost;
3730}
3731
3732// ResourceAssignmentFilter
3733class ResourceAssignmentFilter : public LocalSearchFilter {
3734 public:
3735 ResourceAssignmentFilter(const std::vector<IntVar*>& nexts,
3736 LocalDimensionCumulOptimizer* optimizer,
3737 LocalDimensionCumulOptimizer* mp_optimizer,
3738 bool propagate_own_objective_value,
3739 bool filter_objective_cost);
3740 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3741 int64_t objective_min, int64_t objective_max) override;
3742 void Synchronize(const Assignment* assignment,
3743 const Assignment* delta) override;
3744
3745 int64_t GetAcceptedObjectiveValue() const override {
3746 return propagate_own_objective_value_ ? delta_cost_ : 0;
3747 }
3748 int64_t GetSynchronizedObjectiveValue() const override {
3749 return propagate_own_objective_value_ ? synchronized_cost_ : 0;
3750 }
3751 std::string DebugString() const override {
3752 return "ResourceAssignmentFilter(" + dimension_name_ + ")";
3753 }
3754
3755 private:
3756 std::vector<IntVarLocalSearchFilter*> resource_group_assignment_filters_;
3757 int64_t synchronized_cost_;
3758 int64_t delta_cost_;
3759 const bool propagate_own_objective_value_;
3760 const std::string dimension_name_;
3761};
3762
3763ResourceAssignmentFilter::ResourceAssignmentFilter(
3764 const std::vector<IntVar*>& nexts,
3765 LocalDimensionCumulOptimizer* lp_optimizer,
3766 LocalDimensionCumulOptimizer* mp_optimizer,
3767 bool propagate_own_objective_value, bool filter_objective_cost)
3768 : propagate_own_objective_value_(propagate_own_objective_value),
3769 dimension_name_(lp_optimizer->dimension()->name()) {
3770 const RoutingModel& model = *lp_optimizer->dimension()->model();
3771 for (const auto& resource_group : model.GetResourceGroups()) {
3772 resource_group_assignment_filters_.push_back(
3773 model.solver()->RevAlloc(new ResourceGroupAssignmentFilter(
3774 nexts, resource_group.get(), lp_optimizer, mp_optimizer,
3775 filter_objective_cost)));
3776 }
3777}
3778
3779bool ResourceAssignmentFilter::Accept(const Assignment* delta,
3780 const Assignment* deltadelta,
3781 int64_t objective_min,
3782 int64_t objective_max) {
3783 delta_cost_ = 0;
3784 for (LocalSearchFilter* group_filter : resource_group_assignment_filters_) {
3785 if (!group_filter->Accept(delta, deltadelta, objective_min,
3786 objective_max)) {
3787 return false;
3788 }
3789 delta_cost_ =
3790 std::max(delta_cost_, group_filter->GetAcceptedObjectiveValue());
3791 DCHECK_LE(delta_cost_, objective_max)
3792 << "ResourceGroupAssignmentFilter should return false when the "
3793 "objective_max is exceeded.";
3794 }
3795 return true;
3796}
3797
3798void ResourceAssignmentFilter::Synchronize(const Assignment* assignment,
3799 const Assignment* delta) {
3800 synchronized_cost_ = 0;
3801 for (LocalSearchFilter* group_filter : resource_group_assignment_filters_) {
3802 group_filter->Synchronize(assignment, delta);
3803 synchronized_cost_ = std::max(
3804 synchronized_cost_, group_filter->GetSynchronizedObjectiveValue());
3805 }
3806}
3807
3808} // namespace
3809
3811 LocalDimensionCumulOptimizer* lp_optimizer,
3812 LocalDimensionCumulOptimizer* mp_optimizer,
3813 bool propagate_own_objective_value, bool filter_objective_cost) {
3814 const RoutingModel& model = *lp_optimizer->dimension()->model();
3815 DCHECK_NE(lp_optimizer, nullptr);
3816 DCHECK_NE(mp_optimizer, nullptr);
3817 return model.solver()->RevAlloc(new ResourceAssignmentFilter(
3818 model.Nexts(), lp_optimizer, mp_optimizer, propagate_own_objective_value,
3819 filter_objective_cost));
3820}
3821
3822namespace {
3823
3824// This filter accepts deltas for which the assignment satisfies the
3825// constraints of the Solver. This is verified by keeping an internal copy of
3826// the assignment with all Next vars and their updated values, and calling
3827// RestoreAssignment() on the assignment+delta.
3828// TODO(user): Also call the solution finalizer on variables, with the
3829// exception of Next Vars (woud fail on large instances).
3830// WARNING: In the case of mandatory nodes, when all vehicles are currently
3831// being used in the solution but uninserted nodes still remain, this filter
3832// will reject the solution, even if the node could be inserted on one of these
3833// routes, because all Next vars of vehicle starts are already instantiated.
3834// TODO(user): Avoid such false negatives.
3835
3836class CPFeasibilityFilter : public IntVarLocalSearchFilter {
3837 public:
3838 explicit CPFeasibilityFilter(RoutingModel* routing_model);
3839 ~CPFeasibilityFilter() override = default;
3840 std::string DebugString() const override { return "CPFeasibilityFilter"; }
3841 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3842 int64_t objective_min, int64_t objective_max) override;
3843 void OnSynchronize(const Assignment* delta) override;
3844
3845 private:
3846 void AddDeltaToAssignment(const Assignment* delta, Assignment* assignment);
3847
3848 static const int64_t kUnassigned;
3849 const RoutingModel* const model_;
3850 Solver* const solver_;
3851 Assignment* const assignment_;
3852 Assignment* const temp_assignment_;
3853 DecisionBuilder* const restore_;
3854 SearchLimit* const limit_;
3855};
3856
3857const int64_t CPFeasibilityFilter::kUnassigned = -1;
3858
3859CPFeasibilityFilter::CPFeasibilityFilter(RoutingModel* routing_model)
3860 : IntVarLocalSearchFilter(routing_model->Nexts()),
3861 model_(routing_model),
3862 solver_(routing_model->solver()),
3863 assignment_(solver_->MakeAssignment()),
3864 temp_assignment_(solver_->MakeAssignment()),
3865 restore_(solver_->MakeRestoreAssignment(temp_assignment_)),
3866 limit_(solver_->MakeCustomLimit(
3867 [routing_model]() { return routing_model->CheckLimit(); })) {
3868 assignment_->Add(routing_model->Nexts());
3869}
3870
3871bool CPFeasibilityFilter::Accept(const Assignment* delta,
3872 const Assignment* /*deltadelta*/,
3873 int64_t /*objective_min*/,
3874 int64_t /*objective_max*/) {
3875 temp_assignment_->Copy(assignment_);
3876 AddDeltaToAssignment(delta, temp_assignment_);
3877
3878 return solver_->Solve(restore_, limit_);
3879}
3880
3881void CPFeasibilityFilter::OnSynchronize(const Assignment* delta) {
3882 AddDeltaToAssignment(delta, assignment_);
3883}
3884
3885void CPFeasibilityFilter::AddDeltaToAssignment(const Assignment* delta,
3886 Assignment* assignment) {
3887 if (delta == nullptr) {
3888 return;
3889 }
3890 Assignment::IntContainer* const container =
3891 assignment->MutableIntVarContainer();
3892 for (const IntVarElement& delta_element :
3893 delta->IntVarContainer().elements()) {
3894 IntVar* const var = delta_element.Var();
3895 int64_t index = kUnassigned;
3896 // Ignoring variables found in the delta which are not next variables, such
3897 // as vehicle variables.
3898 if (!FindIndex(var, &index)) continue;
3899 DCHECK_EQ(var, Var(index));
3900 const int64_t value = delta_element.Value();
3901
3902 container->AddAtPosition(var, index)->SetValue(value);
3903 if (model_->IsStart(index)) {
3904 if (model_->IsEnd(value)) {
3905 // Do not restore unused routes.
3906 container->MutableElement(index)->Deactivate();
3907 } else {
3908 // Re-activate the route's start in case it was deactivated before.
3909 container->MutableElement(index)->Activate();
3910 }
3911 }
3912 }
3913}
3914
3915} // namespace
3916
3918 return routing_model->solver()->RevAlloc(
3919 new CPFeasibilityFilter(routing_model));
3920}
3921
3922PathState::PathState(int num_nodes, std::vector<int> path_start,
3923 std::vector<int> path_end)
3924 : num_nodes_(num_nodes),
3925 num_paths_(path_start.size()),
3926 num_nodes_threshold_(std::max(16, 4 * num_nodes_)) // Arbitrary value.
3927{
3928 DCHECK_EQ(path_start.size(), num_paths_);
3929 DCHECK_EQ(path_end.size(), num_paths_);
3930 for (int p = 0; p < num_paths_; ++p) {
3931 path_start_end_.push_back({path_start[p], path_end[p]});
3932 }
3933 Reset();
3934}
3935
3937 is_invalid_ = false;
3938 // Initial state is all unperformed: paths go from start to end directly.
3939 committed_index_.assign(num_nodes_, -1);
3940 committed_paths_.assign(num_nodes_, kUnassigned);
3941 committed_nodes_.assign(2 * num_paths_, -1);
3942 chains_.assign(num_paths_ + 1, {-1, -1}); // Reserve 1 more for sentinel.
3943 paths_.assign(num_paths_, {-1, -1});
3944 for (int path = 0; path < num_paths_; ++path) {
3945 const int index = 2 * path;
3946 const auto& [start, end] = path_start_end_[path];
3947 committed_index_[start] = index;
3948 committed_index_[end] = index + 1;
3949
3950 committed_nodes_[index] = start;
3951 committed_nodes_[index + 1] = end;
3952
3953 committed_paths_[start] = path;
3954 committed_paths_[end] = path;
3955
3956 chains_[path] = {index, index + 2};
3957 paths_[path] = {path, path + 1};
3958 }
3959 chains_[num_paths_] = {0, 0}; // Sentinel.
3960 // Nodes that are not starts or ends are not in any path, but they still need
3961 // to be represented in the committed state.
3962 for (int node = 0; node < num_nodes_; ++node) {
3963 if (committed_index_[node] != -1) continue; // node is start or end.
3964 committed_index_[node] = committed_nodes_.size();
3965 committed_nodes_.push_back(node);
3966 }
3967}
3968
3970 const PathBounds bounds = paths_[path];
3971 return PathState::ChainRange(chains_.data() + bounds.begin_index,
3972 chains_.data() + bounds.end_index,
3973 committed_nodes_.data());
3974}
3975
3977 const PathBounds bounds = paths_[path];
3978 return PathState::NodeRange(chains_.data() + bounds.begin_index,
3979 chains_.data() + bounds.end_index,
3980 committed_nodes_.data());
3981}
3982
3983void PathState::ChangePath(int path, absl::Span<const ChainBounds> chains) {
3984 changed_paths_.push_back(path);
3985 const int path_begin_index = chains_.size();
3986 chains_.insert(chains_.end(), chains.begin(), chains.end());
3987 const int path_end_index = chains_.size();
3988 paths_[path] = {path_begin_index, path_end_index};
3989 chains_.emplace_back(0, 0); // Sentinel.
3990}
3991
3992void PathState::ChangeLoops(absl::Span<const int> new_loops) {
3993 for (const int loop : new_loops) {
3994 // If the node was already a loop, do not add it.
3995 // If it was not assigned, it becomes a loop.
3996 if (Path(loop) == kLoop) continue;
3997 changed_loops_.push_back(loop);
3998 }
3999}
4000
4002 DCHECK(!IsInvalid());
4003 if (committed_nodes_.size() < num_nodes_threshold_) {
4004 IncrementalCommit();
4005 } else {
4006 FullCommit();
4007 }
4008}
4009
4011 is_invalid_ = false;
4012 chains_.resize(num_paths_ + 1); // One per path + sentinel.
4013 for (const int path : changed_paths_) {
4014 paths_[path] = {path, path + 1};
4015 }
4016 changed_paths_.clear();
4017 changed_loops_.clear();
4018}
4019
4020void PathState::CopyNewPathAtEndOfNodes(int path) {
4021 // Copy path's nodes, chain by chain.
4022 const PathBounds path_bounds = paths_[path];
4023 for (int i = path_bounds.begin_index; i < path_bounds.end_index; ++i) {
4024 const ChainBounds chain_bounds = chains_[i];
4025 committed_nodes_.insert(committed_nodes_.end(),
4026 committed_nodes_.data() + chain_bounds.begin_index,
4027 committed_nodes_.data() + chain_bounds.end_index);
4028 if (committed_paths_[committed_nodes_.back()] == path) continue;
4029 for (int i = chain_bounds.begin_index; i < chain_bounds.end_index; ++i) {
4030 const int node = committed_nodes_[i];
4031 committed_paths_[node] = path;
4032 }
4033 }
4034}
4035
4036// TODO(user): Instead of copying paths at the end systematically,
4037// reuse some of the memory when possible.
4038void PathState::IncrementalCommit() {
4039 const int new_nodes_begin = committed_nodes_.size();
4040 for (const int path : ChangedPaths()) {
4041 const int chain_begin = committed_nodes_.size();
4042 CopyNewPathAtEndOfNodes(path);
4043 const int chain_end = committed_nodes_.size();
4044 chains_[path] = {chain_begin, chain_end};
4045 }
4046 // Re-index all copied nodes.
4047 const int new_nodes_end = committed_nodes_.size();
4048 for (int i = new_nodes_begin; i < new_nodes_end; ++i) {
4049 const int node = committed_nodes_[i];
4050 committed_index_[node] = i;
4051 }
4052 // New loops stay in place: only change their path to kLoop,
4053 // committed_index_ does not change.
4054 for (const int loop : ChangedLoops()) {
4055 committed_paths_[loop] = kLoop;
4056 }
4057 // Committed part of the state is set up, erase incremental changes.
4058 Revert();
4059}
4060
4061void PathState::FullCommit() {
4062 // Copy all paths at the end of committed_nodes_,
4063 // then remove all old committed_nodes_.
4064 const int old_num_nodes = committed_nodes_.size();
4065 for (int path = 0; path < num_paths_; ++path) {
4066 const int new_path_begin = committed_nodes_.size() - old_num_nodes;
4067 CopyNewPathAtEndOfNodes(path);
4068 const int new_path_end = committed_nodes_.size() - old_num_nodes;
4069 chains_[path] = {new_path_begin, new_path_end};
4070 }
4071 committed_nodes_.erase(committed_nodes_.begin(),
4072 committed_nodes_.begin() + old_num_nodes);
4073
4074 // Reindex path nodes, then loop nodes.
4075 constexpr int kUnindexed = -1;
4076 committed_index_.assign(num_nodes_, kUnindexed);
4077 int index = 0;
4078 for (const int node : committed_nodes_) {
4079 committed_index_[node] = index++;
4080 }
4081 for (int node = 0; node < num_nodes_; ++node) {
4082 if (committed_index_[node] != kUnindexed) continue;
4083 committed_index_[node] = index++;
4084 committed_nodes_.push_back(node);
4085 }
4086 for (const int loop : ChangedLoops()) {
4087 committed_paths_[loop] = kLoop;
4088 }
4089 // Committed part of the state is set up, erase incremental changes.
4090 Revert();
4091}
4092
4093namespace {
4094
4095class PathStateFilter : public LocalSearchFilter {
4096 public:
4097 std::string DebugString() const override { return "PathStateFilter"; }
4098 PathStateFilter(std::unique_ptr<PathState> path_state,
4099 const std::vector<IntVar*>& nexts);
4100 void Relax(const Assignment* delta, const Assignment* deltadelta) override;
4101 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4102 return true;
4103 }
4104 void Synchronize(const Assignment*, const Assignment*) override {};
4105 void Commit(const Assignment* assignment, const Assignment* delta) override;
4106 void Revert() override;
4107 void Reset() override;
4108
4109 private:
4110 // Used in arc to chain translation, see below.
4111 struct TailHeadIndices {
4112 int tail_index;
4113 int head_index;
4114 };
4115 struct IndexArc {
4116 int index;
4117 int arc;
4118 bool operator<(const IndexArc& other) const { return index < other.index; }
4119 };
4120
4121 // Translate changed_arcs_ to chains, pass to underlying PathState.
4122 void CutChains();
4123 // From changed_paths_ and changed_arcs_, fill chains_ and paths_.
4124 // Selection-based algorithm in O(n^2), to use for small change sets.
4125 void MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm();
4126 // From changed_paths_ and changed_arcs_, fill chains_ and paths_.
4127 // Generic algorithm in O(sort(n)+n), to use for larger change sets.
4128 void MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm();
4129
4130 const std::unique_ptr<PathState> path_state_;
4131 // Map IntVar* index to node, offset by the min index in nexts.
4132 std::vector<int> variable_index_to_node_;
4133 int index_offset_;
4134
4135 // Used in CutChains(), class member status avoids reallocations.
4136 std::vector<int> changed_paths_;
4137 std::vector<bool> path_has_changed_;
4138 std::vector<std::pair<int, int>> changed_arcs_;
4139 std::vector<int> changed_loops_;
4140 std::vector<TailHeadIndices> tail_head_indices_;
4141 std::vector<IndexArc> arcs_by_tail_index_;
4142 std::vector<IndexArc> arcs_by_head_index_;
4143 std::vector<int> next_arc_;
4144 std::vector<PathState::ChainBounds> path_chains_;
4145};
4146
4147PathStateFilter::PathStateFilter(std::unique_ptr<PathState> path_state,
4148 const std::vector<IntVar*>& nexts)
4149 : path_state_(std::move(path_state)) {
4150 {
4151 int min_index = std::numeric_limits<int>::max();
4152 int max_index = std::numeric_limits<int>::min();
4153 for (const IntVar* next : nexts) {
4154 const int index = next->index();
4155 min_index = std::min<int>(min_index, index);
4156 max_index = std::max<int>(max_index, index);
4157 }
4158 variable_index_to_node_.resize(max_index - min_index + 1, -1);
4159 index_offset_ = min_index;
4160 }
4161
4162 for (int node = 0; node < nexts.size(); ++node) {
4163 const int index = nexts[node]->index() - index_offset_;
4164 variable_index_to_node_[index] = node;
4165 }
4166 path_has_changed_.assign(path_state_->NumPaths(), false);
4167}
4168
4169void PathStateFilter::Relax(const Assignment* delta, const Assignment*) {
4170 path_state_->Revert();
4171 changed_arcs_.clear();
4172 for (const IntVarElement& var_value : delta->IntVarContainer().elements()) {
4173 if (var_value.Var() == nullptr) continue;
4174 const int index = var_value.Var()->index() - index_offset_;
4175 if (index < 0 || variable_index_to_node_.size() <= index) continue;
4176 const int node = variable_index_to_node_[index];
4177 if (node == -1) continue;
4178 if (var_value.Bound()) {
4179 changed_arcs_.emplace_back(node, var_value.Value());
4180 } else {
4181 path_state_->Revert();
4182 path_state_->SetInvalid();
4183 return;
4184 }
4185 }
4186 CutChains();
4187}
4188
4189void PathStateFilter::Reset() { path_state_->Reset(); }
4190
4191// The solver does not guarantee that a given Commit() corresponds to
4192// the previous Relax() (or that there has been a call to Relax()),
4193// so we replay the full change call sequence.
4194void PathStateFilter::Commit(const Assignment* assignment,
4195 const Assignment* delta) {
4196 path_state_->Revert();
4197 if (delta == nullptr || delta->Empty()) {
4198 Relax(assignment, nullptr);
4199 } else {
4200 Relax(delta, nullptr);
4201 }
4202 path_state_->Commit();
4203}
4204
4205void PathStateFilter::Revert() { path_state_->Revert(); }
4206
4207void PathStateFilter::CutChains() {
4208 // Filter out unchanged arcs from changed_arcs_,
4209 // translate changed arcs to changed arc indices.
4210 // Fill changed_paths_ while we hold node_path.
4211 for (const int path : changed_paths_) path_has_changed_[path] = false;
4212 changed_paths_.clear();
4213 tail_head_indices_.clear();
4214 changed_loops_.clear();
4215 int num_changed_arcs = 0;
4216 for (const auto [node, next] : changed_arcs_) {
4217 const int node_index = path_state_->CommittedIndex(node);
4218 const int next_index = path_state_->CommittedIndex(next);
4219 const int node_path = path_state_->Path(node);
4220 if (next != node &&
4221 (next_index != node_index + 1 || node_path < 0)) { // New arc.
4222 tail_head_indices_.push_back({node_index, next_index});
4223 changed_arcs_[num_changed_arcs++] = {node, next};
4224 if (node_path >= 0 && !path_has_changed_[node_path]) {
4225 path_has_changed_[node_path] = true;
4226 changed_paths_.push_back(node_path);
4227 }
4228 } else if (node == next && node_path != PathState::kLoop) { // New loop.
4229 changed_loops_.push_back(node);
4230 }
4231 }
4232 changed_arcs_.resize(num_changed_arcs);
4233
4234 path_state_->ChangeLoops(changed_loops_);
4235 if (tail_head_indices_.size() + changed_paths_.size() <= 8) {
4236 MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm();
4237 } else {
4238 MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm();
4239 }
4240}
4241
4242void PathStateFilter::
4243 MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm() {
4244 int num_visited_changed_arcs = 0;
4245 const int num_changed_arcs = tail_head_indices_.size();
4246 // For every path, find all its chains.
4247 for (const int path : changed_paths_) {
4248 path_chains_.clear();
4249 const auto [start_index, end_index] = path_state_->CommittedPathRange(path);
4250 int current_index = start_index;
4251 while (true) {
4252 // Look for smallest non-visited tail_index that is no smaller than
4253 // current_index.
4254 int selected_arc = -1;
4255 int selected_tail_index = std::numeric_limits<int>::max();
4256 for (int i = num_visited_changed_arcs; i < num_changed_arcs; ++i) {
4257 const int tail_index = tail_head_indices_[i].tail_index;
4258 if (current_index <= tail_index && tail_index < selected_tail_index) {
4259 selected_arc = i;
4260 selected_tail_index = tail_index;
4261 }
4262 }
4263 // If there is no such tail index, or more generally if the next chain
4264 // would be cut by end of path,
4265 // stack {current_index, end_index + 1} in chains_, and go to next path.
4266 // Otherwise, stack {current_index, tail_index+1} in chains_,
4267 // set current_index = head_index, set pair to visited.
4268 if (start_index <= current_index && current_index < end_index &&
4269 end_index <= selected_tail_index) {
4270 path_chains_.emplace_back(current_index, end_index);
4271 break;
4272 } else {
4273 path_chains_.emplace_back(current_index, selected_tail_index + 1);
4274 current_index = tail_head_indices_[selected_arc].head_index;
4275 std::swap(tail_head_indices_[num_visited_changed_arcs],
4276 tail_head_indices_[selected_arc]);
4277 ++num_visited_changed_arcs;
4278 }
4279 }
4280 path_state_->ChangePath(path, path_chains_);
4281 }
4282}
4283
4284void PathStateFilter::MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm() {
4285 // TRICKY: For each changed path, we want to generate a sequence of chains
4286 // that represents the path in the changed state.
4287 // First, notice that if we add a fake end->start arc for each changed path,
4288 // then all chains will be from the head of an arc to the tail of an arc.
4289 // A way to generate the changed chains and paths would be, for each path,
4290 // to start from a fake arc's head (the path start), go down the path until
4291 // the tail of an arc, and go to the next arc until we return on the fake arc,
4292 // enqueuing the [head, tail] chains as we go.
4293 // In turn, to do that, we need to know which arc to go to.
4294 // If we sort all heads and tails by index in two separate arrays,
4295 // the head_index and tail_index at the same rank are such that
4296 // [head_index, tail_index] is a chain. Moreover, the arc that must be visited
4297 // after head_index's arc is tail_index's arc.
4298
4299 // Add a fake end->start arc for each path.
4300 for (const int path : changed_paths_) {
4301 const auto [start_index, end_index] = path_state_->CommittedPathRange(path);
4302 tail_head_indices_.push_back({end_index - 1, start_index});
4303 }
4304
4305 // Generate pairs (tail_index, arc) and (head_index, arc) for all arcs,
4306 // sort those pairs by index.
4307 const int num_arc_indices = tail_head_indices_.size();
4308 arcs_by_tail_index_.resize(num_arc_indices);
4309 arcs_by_head_index_.resize(num_arc_indices);
4310 for (int i = 0; i < num_arc_indices; ++i) {
4311 arcs_by_tail_index_[i] = {tail_head_indices_[i].tail_index, i};
4312 arcs_by_head_index_[i] = {tail_head_indices_[i].head_index, i};
4313 }
4314 std::sort(arcs_by_tail_index_.begin(), arcs_by_tail_index_.end());
4315 std::sort(arcs_by_head_index_.begin(), arcs_by_head_index_.end());
4316 // Generate the map from arc to next arc in path.
4317 next_arc_.resize(num_arc_indices);
4318 for (int i = 0; i < num_arc_indices; ++i) {
4319 next_arc_[arcs_by_head_index_[i].arc] = arcs_by_tail_index_[i].arc;
4320 }
4321
4322 // Generate chains: for every changed path, start from its fake arc,
4323 // jump to next_arc_ until going back to fake arc,
4324 // enqueuing chains as we go.
4325 const int first_fake_arc = num_arc_indices - changed_paths_.size();
4326 for (int fake_arc = first_fake_arc; fake_arc < num_arc_indices; ++fake_arc) {
4327 path_chains_.clear();
4328 int32_t arc = fake_arc;
4329 do {
4330 const int chain_begin = tail_head_indices_[arc].head_index;
4331 arc = next_arc_[arc];
4332 const int chain_end = tail_head_indices_[arc].tail_index + 1;
4333 path_chains_.emplace_back(chain_begin, chain_end);
4334 } while (arc != fake_arc);
4335 const int path = changed_paths_[fake_arc - first_fake_arc];
4336 path_state_->ChangePath(path, path_chains_);
4337 }
4338}
4339
4340} // namespace
4341
4343 std::unique_ptr<PathState> path_state,
4344 const std::vector<IntVar*>& nexts) {
4345 PathStateFilter* filter = new PathStateFilter(std::move(path_state), nexts);
4346 return solver->RevAlloc(filter);
4347}
4348
4349namespace {
4350using EInterval = DimensionChecker::ExtendedInterval;
4351
4352constexpr int64_t kint64min = std::numeric_limits<int64_t>::min();
4353constexpr int64_t kint64max = std::numeric_limits<int64_t>::max();
4354
4355EInterval operator&(const EInterval& i1, const EInterval& i2) {
4356 return {std::max(i1.num_negative_infinity == 0 ? i1.min : kint64min,
4357 i2.num_negative_infinity == 0 ? i2.min : kint64min),
4358 std::min(i1.num_positive_infinity == 0 ? i1.max : kint64max,
4359 i2.num_positive_infinity == 0 ? i2.max : kint64max),
4360 std::min(i1.num_negative_infinity, i2.num_negative_infinity),
4361 std::min(i1.num_positive_infinity, i2.num_positive_infinity)};
4362}
4363
4364EInterval operator&=(EInterval& i1, const EInterval& i2) {
4365 i1 = i1 & i2;
4366 return i1;
4367}
4368
4369bool IsEmpty(const EInterval& interval) {
4370 const int64_t minimum_value =
4371 interval.num_negative_infinity == 0 ? interval.min : kint64min;
4372 const int64_t maximum_value =
4373 interval.num_positive_infinity == 0 ? interval.max : kint64max;
4374 return minimum_value > maximum_value;
4375}
4376
4377EInterval operator+(const EInterval& i1, const EInterval& i2) {
4378 return {CapAdd(i1.min, i2.min), CapAdd(i1.max, i2.max),
4379 i1.num_negative_infinity + i2.num_negative_infinity,
4380 i1.num_positive_infinity + i2.num_positive_infinity};
4381}
4382
4383EInterval& operator+=(EInterval& i1, const EInterval& i2) {
4384 i1 = i1 + i2;
4385 return i1;
4386}
4387
4388EInterval operator-(const EInterval& i1, const EInterval& i2) {
4389 return {CapSub(i1.min, i2.max), CapSub(i1.max, i2.min),
4390 i1.num_negative_infinity + i2.num_positive_infinity,
4391 i1.num_positive_infinity + i2.num_negative_infinity};
4392}
4393
4394// Return the interval delta such that from + delta = to.
4395// Note that the result is not the same as "to + (-from)".
4396EInterval Delta(const EInterval& from, const EInterval& to) {
4397 return {CapSub(to.min, from.min), CapSub(to.max, from.max),
4398 to.num_negative_infinity - from.num_negative_infinity,
4399 to.num_positive_infinity - from.num_positive_infinity};
4400}
4401
4402EInterval ToExtendedInterval(DimensionChecker::Interval interval) {
4403 const bool is_neg_infinity = interval.min == kint64min;
4404 const bool is_pos_infinity = interval.max == kint64max;
4405 return {is_neg_infinity ? 0 : interval.min,
4406 is_pos_infinity ? 0 : interval.max, is_neg_infinity ? 1 : 0,
4407 is_pos_infinity ? 1 : 0};
4408}
4409
4410std::vector<EInterval> ToExtendedIntervals(
4411 absl::Span<const DimensionChecker::Interval> intervals) {
4412 std::vector<EInterval> extended_intervals;
4413 extended_intervals.reserve(intervals.size());
4414 for (const auto& interval : intervals) {
4415 extended_intervals.push_back(ToExtendedInterval(interval));
4416 }
4417 return extended_intervals;
4418}
4419} // namespace
4420
4422 const PathState* path_state, std::vector<Interval> path_capacity,
4423 std::vector<int> path_class,
4424 std::vector<std::function<Interval(int64_t, int64_t)>>
4425 demand_per_path_class,
4426 std::vector<Interval> node_capacity, int min_range_size_for_riq)
4427 : path_state_(path_state),
4428 path_capacity_(ToExtendedIntervals(path_capacity)),
4429 path_class_(std::move(path_class)),
4430 demand_per_path_class_(std::move(demand_per_path_class)),
4431 node_capacity_(ToExtendedIntervals(node_capacity)),
4432 index_(path_state_->NumNodes(), 0),
4433 maximum_riq_layer_size_(std::max(
4434 16, 4 * path_state_->NumNodes())), // 16 and 4 are arbitrary.
4435 min_range_size_for_riq_(min_range_size_for_riq) {
4436 const int num_nodes = path_state_->NumNodes();
4437 cached_demand_.resize(num_nodes);
4438 const int num_paths = path_state_->NumPaths();
4439 DCHECK_EQ(num_paths, path_capacity_.size());
4440 DCHECK_EQ(num_paths, path_class_.size());
4441 const int maximum_riq_exponent = MostSignificantBitPosition32(num_nodes);
4442 riq_.resize(maximum_riq_exponent + 1);
4443 FullCommit();
4444}
4445
4447 if (path_state_->IsInvalid()) return true;
4448 for (const int path : path_state_->ChangedPaths()) {
4449 const EInterval path_capacity = path_capacity_[path];
4450 const int path_class = path_class_[path];
4451 // Loop invariant: except for the first chain, cumul represents the cumul
4452 // state of the last node of the previous chain, and it is nonempty.
4453 int prev_node = path_state_->Start(path);
4454 EInterval cumul = node_capacity_[prev_node] & path_capacity;
4455 if (IsEmpty(cumul)) return false;
4456
4457 for (const auto chain : path_state_->Chains(path)) {
4458 const int first_node = chain.First();
4459 const int last_node = chain.Last();
4460
4461 if (prev_node != first_node) {
4462 // Bring cumul state from last node of previous chain to first node of
4463 // current chain.
4464 const EInterval demand = ToExtendedInterval(
4465 demand_per_path_class_[path_class](prev_node, first_node));
4466 cumul += demand;
4467 cumul &= path_capacity;
4468 cumul &= node_capacity_[first_node];
4469 if (IsEmpty(cumul)) return false;
4470 prev_node = first_node;
4471 }
4472
4473 // Bring cumul state from first node to last node of the current chain.
4474 const int first_index = index_[first_node];
4475 const int last_index = index_[last_node];
4476 const int chain_path = path_state_->Path(first_node);
4477 const int chain_path_class =
4478 chain_path < 0 ? -1 : path_class_[chain_path];
4479 // Use a RIQ if the chain size is large enough;
4480 // the optimal size was found with the associated benchmark in tests,
4481 // in particular BM_DimensionChecker<ChangeSparsity::kSparse, *>.
4482 const bool chain_is_cached = chain_path_class == path_class;
4483 if (last_index - first_index > min_range_size_for_riq_ &&
4484 chain_is_cached) {
4485 UpdateCumulUsingChainRIQ(first_index, last_index, path_capacity, cumul);
4486 if (IsEmpty(cumul)) return false;
4487 prev_node = chain.Last();
4488 } else {
4489 for (const int node : chain.WithoutFirstNode()) {
4490 const EInterval demand =
4491 chain_is_cached
4492 ? cached_demand_[prev_node]
4493 : ToExtendedInterval(
4494 demand_per_path_class_[path_class](prev_node, node));
4495 cumul += demand;
4496 cumul &= node_capacity_[node];
4497 cumul &= path_capacity;
4498 if (IsEmpty(cumul)) return false;
4499 prev_node = node;
4500 }
4501 }
4502 }
4503 }
4504 return true;
4505}
4506
4508 const int current_layer_size = riq_[0].size();
4509 int change_size = path_state_->ChangedPaths().size();
4510 for (const int path : path_state_->ChangedPaths()) {
4511 for (const auto chain : path_state_->Chains(path)) {
4512 change_size += chain.NumNodes();
4513 }
4514 }
4515 if (current_layer_size + change_size <= maximum_riq_layer_size_) {
4516 IncrementalCommit();
4517 } else {
4518 FullCommit();
4519 }
4520}
4521
4522void DimensionChecker::IncrementalCommit() {
4523 for (const int path : path_state_->ChangedPaths()) {
4524 const int begin_index = riq_[0].size();
4525 AppendPathDemandsToSums(path);
4526 UpdateRIQStructure(begin_index, riq_[0].size());
4527 }
4528}
4529
4530void DimensionChecker::FullCommit() {
4531 // Clear all structures.
4532 for (auto& layer : riq_) layer.clear();
4533 // Append all paths.
4534 const int num_paths = path_state_->NumPaths();
4535 for (int path = 0; path < num_paths; ++path) {
4536 const int begin_index = riq_[0].size();
4537 AppendPathDemandsToSums(path);
4538 UpdateRIQStructure(begin_index, riq_[0].size());
4539 }
4540}
4541
4542void DimensionChecker::AppendPathDemandsToSums(int path) {
4543 // Value of forwards_demand_sums_riq_ at node_index must be the sum
4544 // of all demands of nodes from start of path to node.
4545 const int path_class = path_class_[path];
4546 EInterval demand_sum = {0, 0, 0, 0};
4547 int prev = path_state_->Start(path);
4548 int index = riq_[0].size();
4549 for (const int node : path_state_->Nodes(path)) {
4550 // Transition to current node.
4551 const EInterval demand =
4552 prev == node ? EInterval{0, 0, 0, 0}
4553 : ToExtendedInterval(
4554 demand_per_path_class_[path_class](prev, node));
4555 demand_sum += demand;
4556 cached_demand_[prev] = demand;
4557 prev = node;
4558 // Store all data of current node.
4559 index_[node] = index++;
4560 riq_[0].push_back({.cumuls_to_fst = node_capacity_[node],
4561 .tightest_tsum = demand_sum,
4562 .cumuls_to_lst = node_capacity_[node],
4563 .tsum_at_fst = demand_sum,
4564 .tsum_at_lst = demand_sum});
4565 }
4566 cached_demand_[path_state_->End(path)] = {0, 0, 0, 0};
4567}
4568
4569void DimensionChecker::UpdateRIQStructure(int begin_index, int end_index) {
4570 // The max layer is the one used by Range Intersection Query functions on
4571 // (begin_index, end_index - 1).
4572 const int max_layer =
4573 MostSignificantBitPosition32(end_index - begin_index - 1);
4574 for (int layer = 1, half_window = 1; layer <= max_layer;
4575 ++layer, half_window *= 2) {
4576 riq_[layer].resize(end_index);
4577 for (int i = begin_index + 2 * half_window - 1; i < end_index; ++i) {
4578 // The window covered by riq_[layer][i] goes from
4579 // first = i - 2 * half_window + 1 to last = i, inclusive.
4580 // Values are computed from two half-windows of the layer below,
4581 // the F-window = (i - 2 * half_window, i - half_window], and
4582 // the L-window - (i - half_window, i].
4583 const RIQNode& fw = riq_[layer - 1][i - half_window];
4584 const RIQNode& lw = riq_[layer - 1][i];
4585 const EInterval lst_to_lst = Delta(fw.tsum_at_lst, lw.tsum_at_lst);
4586 const EInterval fst_to_fst = Delta(fw.tsum_at_fst, lw.tsum_at_fst);
4587
4588 riq_[layer][i] = {
4589 .cumuls_to_fst = fw.cumuls_to_fst & lw.cumuls_to_fst - fst_to_fst,
4590 .tightest_tsum = fw.tightest_tsum & lw.tightest_tsum,
4591 .cumuls_to_lst = fw.cumuls_to_lst + lst_to_lst & lw.cumuls_to_lst,
4592 .tsum_at_fst = fw.tsum_at_fst,
4593 .tsum_at_lst = lw.tsum_at_lst};
4594 }
4595 }
4596}
4597
4598// The RIQ schema decomposes the request into two windows:
4599// - the F window covers indices [first_index, first_index + window)
4600// - the L window covers indices (last_index - window, last_index]
4601// The decomposition uses the first and last nodes of these windows.
4602void DimensionChecker::UpdateCumulUsingChainRIQ(
4603 int first_index, int last_index, const ExtendedInterval& path_capacity,
4604 ExtendedInterval& cumul) const {
4605 DCHECK_LE(0, first_index);
4606 DCHECK_LT(first_index, last_index);
4607 DCHECK_LT(last_index, riq_[0].size());
4608 const int layer = MostSignificantBitPosition32(last_index - first_index);
4609 const int window = 1 << layer;
4610 const RIQNode& fw = riq_[layer][first_index + window - 1];
4611 const RIQNode& lw = riq_[layer][last_index];
4612
4613 // Compute the set of cumul values that can reach the last node.
4614 cumul &= fw.cumuls_to_fst;
4615 cumul &= lw.cumuls_to_fst - Delta(fw.tsum_at_fst, lw.tsum_at_fst);
4616 cumul &= path_capacity -
4617 Delta(fw.tsum_at_fst, fw.tightest_tsum & lw.tightest_tsum);
4618
4619 // We need to check for emptiness before widening the interval with transit.
4620 if (IsEmpty(cumul)) return;
4621
4622 // Transit to last node.
4623 cumul += Delta(fw.tsum_at_fst, lw.tsum_at_lst);
4624
4625 // Compute the set of cumul values that are reached from first node.
4626 cumul &= fw.cumuls_to_lst + Delta(fw.tsum_at_lst, lw.tsum_at_lst);
4627 cumul &= lw.cumuls_to_lst;
4628}
4629
4630namespace {
4631
4632class DimensionFilter : public LocalSearchFilter {
4633 public:
4634 std::string DebugString() const override { return name_; }
4635 DimensionFilter(std::unique_ptr<DimensionChecker> checker,
4636 absl::string_view dimension_name)
4637 : checker_(std::move(checker)),
4638 name_(absl::StrCat("DimensionFilter(", dimension_name, ")")) {}
4639
4640 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4641 return checker_->Check();
4642 }
4643
4644 void Synchronize(const Assignment*, const Assignment*) override {
4645 checker_->Commit();
4646 }
4647
4648 private:
4649 std::unique_ptr<DimensionChecker> checker_;
4650 const std::string name_;
4651};
4652
4653} // namespace
4654
4656 Solver* solver, std::unique_ptr<DimensionChecker> checker,
4657 absl::string_view dimension_name) {
4658 DimensionFilter* filter =
4659 new DimensionFilter(std::move(checker), dimension_name);
4660 return solver->RevAlloc(filter);
4661}
4662
4664 PathState* path_state, std::vector<PathData> path_data)
4665 : path_state_(path_state), path_data_(std::move(path_data)) {}
4666
4668 for (const int path : path_state_->ChangedPaths()) {
4669 path_data_[path].start_cumul.Relax();
4670 path_data_[path].end_cumul.Relax();
4671 path_data_[path].span.Relax();
4672 }
4673}
4674
4676 for (const int path : path_state_->ChangedPaths()) {
4677 if (!path_data_[path].span.Exists()) continue;
4678 const PathData& data = path_data_[path];
4679 const int64_t total_transit = data.total_transit.Min();
4680 int64_t lb_span = data.span.Min();
4681 // Improve bounds on span/start max/end min using time windows: breaks that
4682 // must occur inside the path have their duration accumulated into
4683 // lb_span_tw, they also widen [start_max, end_min).
4684 int64_t lb_span_tw = total_transit;
4685 int64_t start_max = data.start_cumul.Max();
4686 int64_t end_min = data.end_cumul.Min();
4687 for (const auto& br : data.vehicle_breaks) {
4688 if (!br.is_performed_min) continue;
4689 if (br.start_max < end_min && start_max < br.end_min) {
4690 CapAddTo(br.duration_min, &lb_span_tw);
4691 start_max = std::min(start_max, br.start_max);
4692 end_min = std::max(end_min, br.end_min);
4693 }
4694 }
4695 lb_span = std::max({lb_span, lb_span_tw, CapSub(end_min, start_max)});
4696 // Compute num_feasible_breaks = number of breaks that may fit into route,
4697 // and [breaks_start_min, breaks_end_max) = max coverage of breaks.
4698 int64_t break_start_min = kint64max;
4699 int64_t break_end_max = kint64min;
4700 int64_t start_min = data.start_cumul.Min();
4701 start_min = std::max(start_min, CapSub(end_min, data.span.Max()));
4702 int64_t end_max = data.end_cumul.Max();
4703 end_max = std::min(end_max, CapAdd(start_max, data.span.Max()));
4704 int num_feasible_breaks = 0;
4705 for (const auto& br : data.vehicle_breaks) {
4706 if (start_min <= br.start_max && br.end_min <= end_max) {
4707 break_start_min = std::min(break_start_min, br.start_min);
4708 break_end_max = std::max(break_end_max, br.end_max);
4709 ++num_feasible_breaks;
4710 }
4711 }
4712 // Improve span/start min/end max using interbreak limits: there must be
4713 // enough breaks inside the path, so that for each limit, the union of
4714 // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end),
4715 // or [start, end) is shorter than max_interbreak.
4716 for (const auto& [max_interbreak, min_break_duration] :
4717 data.interbreak_limits) {
4718 // Minimal number of breaks depends on total transit:
4719 // 0 breaks for 0 <= total transit <= limit,
4720 // 1 break for limit + 1 <= total transit <= 2 * limit,
4721 // i breaks for i * limit + 1 <= total transit <= (i+1) * limit, ...
4722 if (max_interbreak == 0) {
4723 if (total_transit > 0) return false;
4724 continue;
4725 }
4726 int64_t min_num_breaks =
4727 std::max<int64_t>(0, (total_transit - 1) / max_interbreak);
4728 if (lb_span > max_interbreak) {
4729 min_num_breaks = std::max<int64_t>(min_num_breaks, 1);
4730 }
4731 if (min_num_breaks > num_feasible_breaks) return false;
4732 lb_span = std::max(
4733 lb_span,
4734 CapAdd(total_transit, CapProd(min_num_breaks, min_break_duration)));
4735 if (min_num_breaks > 0) {
4736 if (!data.start_cumul.SetMin(CapSub(break_start_min, max_interbreak))) {
4737 return false;
4738 }
4739 if (!data.end_cumul.SetMax(CapAdd(break_end_max, max_interbreak))) {
4740 return false;
4741 }
4742 }
4743 }
4744 if (!data.span.SetMin(lb_span)) return false;
4745 // Merge span lb information directly in start/end variables.
4746 start_max = std::min(start_max, CapSub(end_max, lb_span));
4747 if (!data.start_cumul.SetMax(start_max)) return false;
4748 end_min = std::max(end_min, CapAdd(start_min, lb_span));
4749 if (!data.end_cumul.SetMin(end_min)) return false;
4750 }
4751 return true;
4752}
4753
4754namespace {
4755
4756class LightVehicleBreaksFilter : public LocalSearchFilter {
4757 public:
4758 LightVehicleBreaksFilter(std::unique_ptr<LightVehicleBreaksChecker> checker,
4759 absl::string_view dimension_name)
4760 : checker_(std::move(checker)),
4761 name_(absl::StrCat("LightVehicleBreaksFilter(", dimension_name, ")")) {}
4762
4763 std::string DebugString() const override { return name_; }
4764
4765 void Relax(const Assignment*, const Assignment*) override {
4766 checker_->Relax();
4767 }
4768
4769 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4770 return checker_->Check();
4771 }
4772
4773 void Synchronize(const Assignment*, const Assignment*) override {
4774 checker_->Check();
4775 }
4776
4777 private:
4778 std::unique_ptr<LightVehicleBreaksChecker> checker_;
4779 const std::string name_;
4780};
4781
4782} // namespace
4783
4785 Solver* solver, std::unique_ptr<LightVehicleBreaksChecker> checker,
4786 absl::string_view dimension_name) {
4787 LightVehicleBreaksFilter* filter =
4788 new LightVehicleBreaksFilter(std::move(checker), dimension_name);
4789 return solver->RevAlloc(filter);
4790}
4791
4793 elements_.clear();
4794 tree_location_.clear();
4795 nodes_.clear();
4796 for (auto& layer : tree_layers_) layer.clear();
4797}
4798
4800 // New elements are elements_[i] for i in [begin_index, end_index).
4801 const int begin_index = tree_location_.size();
4802 const int end_index = elements_.size();
4803 DCHECK_LE(begin_index, end_index);
4804 if (begin_index >= end_index) return;
4805 // Gather all heights, sort and unique them, this makes up the list of
4806 // pivot heights of the underlying tree, with an inorder traversal.
4807 // TODO(user): investigate whether balancing the tree using the
4808 // number of occurrences of each height would be beneficial.
4809 // TODO(user): use a heap-like encoding for the binary search tree:
4810 // children of i at 2*i and 2*i+1. Better cache line utilization.
4811 const int old_node_size = nodes_.size();
4812 for (int i = begin_index; i < end_index; ++i) {
4813 nodes_.push_back({.pivot_height = elements_[i].height, .pivot_index = -1});
4814 }
4815 std::sort(nodes_.begin() + old_node_size, nodes_.end());
4816 nodes_.erase(std::unique(nodes_.begin() + old_node_size, nodes_.end()),
4817 nodes_.end());
4818
4819 // Remember location of the tree representation for this range of elements.
4820 // tree_location_ may be smaller than elements_, extend it if needed.
4821 const int new_node_size = nodes_.size();
4822 tree_location_.resize(end_index, {.node_begin = old_node_size,
4823 .node_end = new_node_size,
4824 .sequence_first = begin_index});
4825
4826 // Add and extend layers if needed.
4827 // The amount of layers needed is 1 + ceil(log(sequence size)).
4828 const int num_layers =
4829 2 + MostSignificantBitPosition32(new_node_size - old_node_size - 1);
4830 if (tree_layers_.size() <= num_layers) tree_layers_.resize(num_layers);
4831 for (int l = 0; l < num_layers; ++l) {
4832 tree_layers_[l].resize(end_index,
4833 {.prefix_sum = 0, .left_index = -1, .is_left = 0});
4834 }
4835
4836 // Fill all relevant locations of the tree, and record tree navigation
4837 // information. This recursive function has at most num_layers call depth.
4838 const auto fill_subtree = [this](auto& fill_subtree, int layer,
4839 int node_begin, int node_end,
4840 int range_begin, int range_end) {
4841 DCHECK_LT(node_begin, node_end);
4842 DCHECK_LT(range_begin, range_end);
4843 // Precompute prefix sums of range [range_begin, range_end).
4844 int64_t sum = 0;
4845 for (int i = range_begin; i < range_end; ++i) {
4846 sum += elements_[i].weight;
4847 tree_layers_[layer][i].prefix_sum = sum;
4848 }
4849 if (node_begin + 1 == node_end) return;
4850 // Range has more than one height, partition it.
4851 // Record layer l -> l+1 sequence index mapping:
4852 // - if height < pivot, record where this element will be in layer l+1.
4853 // - if height >= pivot, record where next <= pivot will be in layer l+1.
4854 const int node_mid = node_begin + (node_end - node_begin) / 2;
4855 const int64_t pivot_height = nodes_[node_mid].pivot_height;
4856 int pivot_index = range_begin;
4857 for (int i = range_begin; i < range_end; ++i) {
4858 tree_layers_[layer][i].left_index = pivot_index;
4859 tree_layers_[layer][i].is_left = elements_[i].height < pivot_height;
4860 if (elements_[i].height < pivot_height) ++pivot_index;
4861 }
4862 nodes_[node_mid].pivot_index = pivot_index;
4863 // TODO(user): stable_partition allocates memory,
4864 // find a way to fill layers without this.
4865 std::stable_partition(
4866 elements_.begin() + range_begin, elements_.begin() + range_end,
4867 [pivot_height](const auto& el) { return el.height < pivot_height; });
4868
4869 fill_subtree(fill_subtree, layer + 1, node_begin, node_mid, range_begin,
4870 pivot_index);
4871 fill_subtree(fill_subtree, layer + 1, node_mid, node_end, pivot_index,
4872 range_end);
4873 };
4874 fill_subtree(fill_subtree, 0, old_node_size, new_node_size, begin_index,
4875 end_index);
4876}
4877
4878int64_t WeightedWaveletTree::RangeSumWithThreshold(int64_t threshold_height,
4879 int begin_index,
4880 int end_index) const {
4881 DCHECK_LE(begin_index, end_index); // Range can be empty, but not reversed.
4882 DCHECK_LE(end_index, tree_location_.size());
4883 DCHECK_EQ(tree_location_.size(), elements_.size()); // No pending elements.
4884 if (begin_index >= end_index) return 0;
4885 auto [node_begin, node_end, sequence_first_index] =
4886 tree_location_[begin_index];
4887 DCHECK_EQ(tree_location_[end_index - 1].sequence_first,
4888 sequence_first_index); // Range is included in a single sequence.
4889 ElementRange range{
4890 .range_first_index = begin_index,
4891 .range_last_index = end_index - 1,
4892 .range_first_is_node_first = begin_index == sequence_first_index};
4893 // Answer in O(1) for the common case where max(heights) < threshold.
4894 if (nodes_[node_end - 1].pivot_height < threshold_height) return 0;
4895
4896 int64_t sum = 0;
4897 int64_t min_height_of_current_node = nodes_[node_begin].pivot_height;
4898 for (int l = 0; !range.Empty(); ++l) {
4899 const ElementInfo* elements = tree_layers_[l].data();
4900 if (threshold_height <= min_height_of_current_node) {
4901 // Query or subquery threshold covers all elements of this node.
4902 // This allows to be O(1) when the query's threshold is <= min(heights).
4903 sum += range.Sum(elements);
4904 return sum;
4905 } else if (node_begin + 1 == node_end) {
4906 // This node is a leaf, its height is < threshold, stop descent here.
4907 return sum;
4908 }
4909
4910 const int node_mid = node_begin + (node_end - node_begin) / 2;
4911 const auto [pivot_height, pivot_index] = nodes_[node_mid];
4912 const ElementRange right = range.RightSubRange(elements, pivot_index);
4913 if (threshold_height < pivot_height) {
4914 // All elements of the right child have their height above the threshold,
4915 // we can project the range to the right child and add the whole subrange.
4916 if (!right.Empty()) sum += right.Sum(tree_layers_[l + 1].data());
4917 // Go to the left child.
4918 range = range.LeftSubRange(elements);
4919 node_end = node_mid;
4920 } else {
4921 // Go to the right child.
4922 range = right;
4923 node_begin = node_mid;
4924 min_height_of_current_node = pivot_height;
4925 }
4926 }
4927 return sum;
4928}
4929
4931 const PathState* path_state, std::vector<int64_t> force_start_min,
4932 std::vector<int64_t> force_end_min, std::vector<int> force_class,
4933 std::vector<const std::function<int64_t(int64_t)>*> force_per_class,
4934 std::vector<int> distance_class,
4935 std::vector<const std::function<int64_t(int64_t, int64_t)>*>
4936 distance_per_class,
4937 std::vector<EnergyCost> path_energy_cost,
4938 std::vector<bool> path_has_cost_when_empty)
4939 : path_state_(path_state),
4940 force_start_min_(std::move(force_start_min)),
4941 force_end_min_(std::move(force_end_min)),
4942 force_class_(std::move(force_class)),
4943 distance_class_(std::move(distance_class)),
4944 force_per_class_(std::move(force_per_class)),
4945 distance_per_class_(std::move(distance_per_class)),
4946 path_energy_cost_(std::move(path_energy_cost)),
4947 path_has_cost_when_empty_(std::move(path_has_cost_when_empty)),
4948 maximum_range_query_size_(4 * path_state_->NumNodes()),
4949 force_rmq_index_of_node_(path_state_->NumNodes()),
4950 threshold_query_index_of_node_(path_state_->NumNodes()) {
4951 const int num_nodes = path_state_->NumNodes();
4952 cached_force_.resize(num_nodes);
4953 cached_distance_.resize(num_nodes);
4954 FullCacheAndPrecompute();
4955 committed_total_cost_ = 0;
4956 committed_path_cost_.assign(path_state_->NumPaths(), 0);
4957 const int num_paths = path_state_->NumPaths();
4958 for (int path = 0; path < num_paths; ++path) {
4959 committed_path_cost_[path] = ComputePathCost(path);
4960 CapAddTo(committed_path_cost_[path], &committed_total_cost_);
4961 }
4962 accepted_total_cost_ = committed_total_cost_;
4963}
4964
4966 if (path_state_->IsInvalid()) return true;
4967 accepted_total_cost_ = committed_total_cost_;
4968 for (const int path : path_state_->ChangedPaths()) {
4969 accepted_total_cost_ =
4970 CapSub(accepted_total_cost_, committed_path_cost_[path]);
4971 CapAddTo(ComputePathCost(path), &accepted_total_cost_);
4972 if (accepted_total_cost_ == kint64max) return false;
4973 }
4974 return true;
4975}
4976
4977void PathEnergyCostChecker::CacheAndPrecomputeRangeQueriesOfPath(int path) {
4978 // Cache force and distance evaluations,
4979 // precompute force RMQ, energy/distance threshold queries.
4980 const auto& force_evaluator = *force_per_class_[force_class_[path]];
4981 const auto& distance_evaluator = *distance_per_class_[distance_class_[path]];
4982 int force_index = force_rmq_.TableSize();
4983 int threshold_index = energy_query_.TreeSize();
4984 int64_t total_force = 0;
4985
4986 const int start_node = path_state_->Start(path);
4987 int prev_node = start_node;
4988
4989 for (const int node : path_state_->Nodes(path)) {
4990 if (prev_node != node) {
4991 const int64_t distance = distance_evaluator(prev_node, node);
4992 cached_distance_[prev_node] = distance;
4993 energy_query_.PushBack(total_force, total_force * distance);
4994 distance_query_.PushBack(total_force, distance);
4995 prev_node = node;
4996 }
4997 threshold_query_index_of_node_[node] = threshold_index++;
4998 force_rmq_.PushBack(total_force);
4999 force_rmq_index_of_node_[node] = force_index++;
5000 const int64_t force = force_evaluator(node);
5001 cached_force_[node] = force;
5002 total_force += force;
5003 }
5004 force_rmq_.MakeTableFromNewElements();
5005 energy_query_.MakeTreeFromNewElements();
5006 distance_query_.MakeTreeFromNewElements();
5007}
5008
5009void PathEnergyCostChecker::IncrementalCacheAndPrecompute() {
5010 for (const int path : path_state_->ChangedPaths()) {
5011 CacheAndPrecomputeRangeQueriesOfPath(path);
5012 }
5013}
5014
5015void PathEnergyCostChecker::FullCacheAndPrecompute() {
5016 force_rmq_.Clear();
5017 // Rebuild all paths.
5018 const int num_paths = path_state_->NumPaths();
5019 for (int path = 0; path < num_paths; ++path) {
5020 CacheAndPrecomputeRangeQueriesOfPath(path);
5021 }
5022}
5023
5025 int change_size = path_state_->ChangedPaths().size();
5026 for (const int path : path_state_->ChangedPaths()) {
5027 for (const auto chain : path_state_->Chains(path)) {
5028 change_size += chain.NumNodes();
5029 }
5030 committed_total_cost_ =
5031 CapSub(committed_total_cost_, committed_path_cost_[path]);
5032 committed_path_cost_[path] = ComputePathCost(path);
5033 CapAddTo(committed_path_cost_[path], &committed_total_cost_);
5034 }
5035
5036 const int current_layer_size = force_rmq_.TableSize();
5037 if (current_layer_size + change_size <= maximum_range_query_size_) {
5038 IncrementalCacheAndPrecompute();
5039 } else {
5040 FullCacheAndPrecompute();
5041 }
5042}
5043
5044int64_t PathEnergyCostChecker::ComputePathCost(int64_t path) const {
5045 const int path_force_class = force_class_[path];
5046 const auto& force_evaluator = *force_per_class_[path_force_class];
5047
5048 // Find minimal force at which to start.
5049 int64_t total_force = force_start_min_[path];
5050 int64_t min_force = total_force;
5051 int num_path_nodes = 0;
5052 int prev_node = path_state_->Start(path);
5053 for (const auto chain : path_state_->Chains(path)) {
5054 num_path_nodes += chain.NumNodes();
5055 // Add force needed to go from prev_node to chain.First() if needed.
5056 if (chain.First() != prev_node) {
5057 const int64_t force_to_node = force_evaluator(prev_node);
5058 CapAddTo(force_to_node, &total_force);
5059 min_force = std::min(min_force, total_force);
5060 prev_node = chain.First();
5061 }
5062
5063 // Add force needed to go from chain.First() to chain.Last().
5064 const int chain_path = path_state_->Path(chain.First());
5065 const int chain_force_class =
5066 chain_path < 0 ? -1 : force_class_[chain_path];
5067 const bool force_is_cached = chain_force_class == path_force_class;
5068 if (force_is_cached && chain.NumNodes() >= 2) {
5069 const int first_index = force_rmq_index_of_node_[chain.First()];
5070 const int last_index = force_rmq_index_of_node_[chain.Last()];
5071 // Get total force at first, last and lowest point of the chain.
5072 const int64_t first_total_force = force_rmq_.array()[first_index];
5073 const int64_t last_total_force = force_rmq_.array()[last_index];
5074 const int64_t min_total_force =
5075 force_rmq_.RangeMinimum(first_index, last_index);
5076 // Compute running minimum total force and total force at chain.Last().
5077 min_force = std::min(min_force,
5078 total_force - first_total_force + min_total_force);
5079 CapAddTo(last_total_force - first_total_force, &total_force);
5080 prev_node = chain.Last();
5081 } else {
5082 for (const int node : chain.WithoutFirstNode()) {
5083 const int64_t force = force_is_cached ? cached_force_[prev_node]
5084 : force_evaluator(prev_node);
5085 CapAddTo(force, &total_force);
5086 min_force = std::min(min_force, total_force);
5087 prev_node = node;
5088 }
5089 }
5090 }
5091 if (num_path_nodes == 2 && !path_has_cost_when_empty_[path]) return 0;
5092 // Force must be offset in order to be all of:
5093 // - >= force_start_min_[path] at start
5094 // - >= force_end_min_[path] at end
5095 // - >= 0 at all intermediate nodes
5096 // We set the accumulator to the minimal offset that allows this.
5097 total_force = std::max<int64_t>(
5098 {0, CapOpp(min_force), CapSub(force_end_min_[path], total_force)});
5099 CapAddTo(force_start_min_[path], &total_force);
5100
5101 // Compute energy, below and above threshold.
5102 const int path_distance_class = distance_class_[path];
5103 const auto& distance_evaluator = *distance_per_class_[path_distance_class];
5104 const EnergyCost& cost = path_energy_cost_[path];
5105 int64_t energy_below = 0;
5106 int64_t energy_above = 0;
5107 prev_node = path_state_->Start(path);
5108 for (const auto chain : path_state_->Chains(path)) {
5109 // Bring cost computation to first node of the chain.
5110 if (chain.First() != prev_node) {
5111 const int64_t distance = distance_evaluator(prev_node, chain.First());
5112 CapAddTo(force_evaluator(prev_node), &total_force);
5113 CapAddTo(CapProd(std::min(cost.threshold, total_force), distance),
5114 &energy_below);
5115 const int64_t force_above =
5116 std::max<int64_t>(0, CapSub(total_force, cost.threshold));
5117 CapAddTo(CapProd(force_above, distance), &energy_above);
5118 prev_node = chain.First();
5119 }
5120
5121 // Inside chain, try to reuse cached forces and distances instead of more
5122 // costly calls to evaluators.
5123 const int chain_path = path_state_->Path(chain.First());
5124 const int chain_force_class =
5125 chain_path < 0 ? -1 : force_class_[chain_path];
5126 const int chain_distance_class =
5127 chain_path < 0 ? -1 : distance_class_[chain_path];
5128 const bool force_is_cached = chain_force_class == path_force_class;
5129 const bool distance_is_cached = chain_distance_class == path_distance_class;
5130
5131 if (force_is_cached && distance_is_cached && chain.NumNodes() >= 2) {
5132 const int first_index = threshold_query_index_of_node_[chain.First()];
5133 const int last_index = threshold_query_index_of_node_[chain.Last()];
5134
5135 const int64_t zero_total_energy = energy_query_.RangeSumWithThreshold(
5136 kint64min, first_index, last_index);
5137 const int64_t total_distance = distance_query_.RangeSumWithThreshold(
5138 kint64min, first_index, last_index);
5139
5140 // In the following, zero_ values are those computed with the hypothesis
5141 // that the force at the start node is zero.
5142 // The total_force at chain.First() is in general not the same in the
5143 // candidate path and in the zero_ case. We can still query the energy and
5144 // distance totals incurred by transitions above the actual threshold
5145 // during the chain, by offsetting the queries to zero_threshold.
5146 const int64_t zero_total_force_first =
5147 force_rmq_.array()[force_rmq_index_of_node_[chain.First()]];
5148 const int64_t zero_threshold =
5149 CapSub(cost.threshold, CapSub(total_force, zero_total_force_first));
5150 // "High" transitions are those that occur with a force at or above the
5151 // threshold. "High" energy is the sum of energy values during high
5152 // transitions, same for "high" distance.
5153 const int64_t zero_high_energy = energy_query_.RangeSumWithThreshold(
5154 zero_threshold, first_index, last_index);
5155 const int64_t zero_high_distance = distance_query_.RangeSumWithThreshold(
5156 zero_threshold, first_index, last_index);
5157 // "Above" energy is the energy caused by total_force above the threshold.
5158 // Since "above" energy is only incurred during "high" transitions, it can
5159 // be computed from "high" energy knowing distance and threshold.
5160 const int64_t zero_energy_above =
5161 CapSub(zero_high_energy, CapProd(zero_high_distance, zero_threshold));
5162 // To compute the energy values of the candidate, the force dimension
5163 // must be offset back to the candidate's total force.
5164 // Only the "below" energy is changed by the offset, the zero_ energy
5165 // above the zero_ threshold was computed to be the same as the candidate
5166 // energy above the actual threshold.
5167 CapAddTo(zero_energy_above, &energy_above);
5168 CapAddTo(CapAdd(CapSub(zero_total_energy, zero_energy_above),
5169 CapProd(total_distance,
5170 CapSub(cost.threshold, zero_threshold))),
5171 &energy_below);
5172 // We reuse the partial sum of the force query to compute the sum of
5173 // forces incurred by the chain,
5174 const int64_t zero_total_force_last =
5175 force_rmq_.array()[force_rmq_index_of_node_[chain.Last()]];
5176 CapAddTo(CapSub(zero_total_force_last, zero_total_force_first),
5177 &total_force);
5178 prev_node = chain.Last();
5179 } else {
5180 for (const int node : chain.WithoutFirstNode()) {
5181 const int64_t force = force_is_cached ? cached_force_[prev_node]
5182 : force_evaluator(prev_node);
5183 const int64_t distance = distance_is_cached
5184 ? cached_distance_[prev_node]
5185 : distance_evaluator(prev_node, node);
5186 CapAddTo(force, &total_force);
5187 CapAddTo(CapProd(std::min(cost.threshold, total_force), distance),
5188 &energy_below);
5189 const int64_t force_above =
5190 std::max<int64_t>(0, CapSub(total_force, cost.threshold));
5191 CapAddTo(CapProd(force_above, distance), &energy_above);
5192 prev_node = node;
5193 }
5194 }
5195 }
5196
5197 return CapAdd(CapProd(energy_below, cost.cost_per_unit_below_threshold),
5198 CapProd(energy_above, cost.cost_per_unit_above_threshold));
5199}
5200
5201namespace {
5202
5203class PathEnergyCostFilter : public LocalSearchFilter {
5204 public:
5205 std::string DebugString() const override { return name_; }
5206 PathEnergyCostFilter(std::unique_ptr<PathEnergyCostChecker> checker,
5207 absl::string_view energy_name)
5208 : checker_(std::move(checker)),
5209 name_(absl::StrCat("PathEnergyCostFilter(", energy_name, ")")) {}
5210
5211 bool Accept(const Assignment*, const Assignment*, int64_t objective_min,
5212 int64_t objective_max) override {
5213 if (objective_max > kint64max / 2) return true;
5214 if (!checker_->Check()) return false;
5215 const int64_t cost = checker_->AcceptedCost();
5216 return objective_min <= cost && cost <= objective_max;
5217 }
5218
5219 void Synchronize(const Assignment*, const Assignment*) override {
5220 checker_->Commit();
5221 }
5222
5223 int64_t GetSynchronizedObjectiveValue() const override {
5224 return checker_->CommittedCost();
5225 }
5226 int64_t GetAcceptedObjectiveValue() const override {
5227 return checker_->AcceptedCost();
5228 }
5229
5230 private:
5231 std::unique_ptr<PathEnergyCostChecker> checker_;
5232 const std::string name_;
5233};
5234
5235} // namespace
5236
5238 Solver* solver, std::unique_ptr<PathEnergyCostChecker> checker,
5239 absl::string_view dimension_name) {
5240 PathEnergyCostFilter* filter =
5241 new PathEnergyCostFilter(std::move(checker), dimension_name);
5242 return solver->RevAlloc(filter);
5243}
5244
5245// TODO(user): Implement same-vehicle filter. Could be merged with node
5246// precedence filter.
5247
5249 const std::vector<std::vector<double>>& rows)
5250 : num_variables_(rows.empty() ? 0 : rows[0].size()),
5251 num_rows_(rows.size()) {
5252 if (num_variables_ == 0) return;
5253 // Map rows to blocks, see class documentation.
5254 const int64_t num_block_rows = (num_rows_ + kBlockSize - 1) / kBlockSize;
5255 blocks_.resize(num_block_rows * num_variables_);
5256 const int64_t num_full_block_rows = num_rows_ / kBlockSize;
5257 for (int64_t br = 0; br < num_full_block_rows; ++br) {
5258 for (int64_t v = 0; v < num_variables_; ++v) {
5259 Block& block = blocks_[br * num_variables_ + v];
5260 for (int c = 0; c < kBlockSize; ++c) {
5261 block.coefficients[c] = rows[br * kBlockSize + c][v];
5262 }
5263 }
5264 }
5265 // If the block representation represents more rows than the original matrix,
5266 // i.e. if num_rows_ % kBlockSize != 0, we need to initialize it and to pad
5267 // non-existent rows.
5268 if (num_full_block_rows == num_block_rows) return; // No padding needed.
5269 DCHECK_EQ(num_full_block_rows + 1, num_block_rows);
5270 Block* last_block_row = &blocks_[num_full_block_rows * num_variables_];
5271 const int first_coefficient_to_pad =
5272 num_rows_ - num_full_block_rows * kBlockSize;
5273 for (int64_t v = 0; v < num_variables_; ++v) {
5274 Block& block = last_block_row[v];
5275 // Copy original coefficients.
5276 for (int c = 0; c < first_coefficient_to_pad; ++c) {
5277 const int64_t r = num_full_block_rows * kBlockSize + c;
5278 block.coefficients[c] = rows[r][v];
5279 }
5280 // Pad coefficients with a copy of the first coefficient.
5281 for (int c = first_coefficient_to_pad; c < kBlockSize; ++c) {
5282 block.coefficients[c] = block.coefficients[0];
5283 }
5284 }
5285}
5286
5288 absl::Span<const double> values) const {
5289 DCHECK_EQ(values.size(), num_variables_);
5290 constexpr double kInfinity = std::numeric_limits<double>::infinity();
5291 if (num_rows_ == 0) return -kInfinity;
5292 if (num_variables_ == 0) return 0.0;
5293 DCHECK_EQ(blocks_.size() % num_variables_, 0);
5294 Block maximums;
5295 absl::c_fill(maximums.coefficients, -kInfinity);
5296 for (auto row = blocks_.begin(); row != blocks_.end();
5297 row += num_variables_) {
5298 Block evaluations;
5299 absl::c_fill(evaluations.coefficients, 0.0);
5300 for (int64_t v = 0; v < num_variables_; ++v) {
5301 evaluations.BlockMultiplyAdd(row[v], values[v]);
5302 }
5303 maximums.MaximumWith(evaluations);
5304 }
5305 return maximums.Maximum();
5306}
5307
5308} // namespace operations_research
const std::vector< E > & elements() const
AssignmentContainer< IntVar, IntVarElement > IntContainer
const IntContainer & IntVarContainer() const
Generic path-based filter class.
BasePathFilter(const std::vector< IntVar * > &nexts, int next_domain_size, const PathsMetadata &paths_metadata)
bool Accept(const Assignment *delta, const Assignment *deltadelta, int64_t objective_min, int64_t objective_max) override
void OnSynchronize(const Assignment *delta) override
DimensionChecker(const PathState *path_state, std::vector< Interval > path_capacity, std::vector< int > path_class, std::vector< std::function< Interval(int64_t, int64_t)> > demand_per_path_class, std::vector< Interval > node_capacity, int min_range_size_for_riq=kOptimalMinRangeSizeForRIQ)
absl::Span< const int64_t > CommittedTravels(int path) const
absl::Span< Interval > MutableCumuls(int path)
absl::Span< const int > Nodes(int path) const
absl::Span< Interval > MutableTransits(int path)
absl::Span< int64_t > MutableTravelSums(int path)
absl::Span< int64_t > MutableTravels(int path)
std::vector< VehicleBreak > & MutableVehicleBreaks(int path)
absl::Span< const int > CommittedNodes(int path) const
absl::Span< const int64_t > TravelSums(int path) const
virtual int64_t Min() const =0
virtual int64_t Max() const =0
IntVarLocalSearchFilter(const std::vector< IntVar * > &vars)
bool FindIndex(IntVar *const var, int64_t *index) const
IntVar * Var() override
Creates a variable from the expression.
LightVehicleBreaksChecker(PathState *path_state, std::vector< PathData > path_data)
double Evaluate(absl::Span< const double > values) const
MaxLinearExpressionEvaluator(const std::vector< std::vector< double > > &rows)
PathEnergyCostChecker(const PathState *path_state, std::vector< int64_t > force_start_min, std::vector< int64_t > force_end_min, std::vector< int > force_class, std::vector< const std::function< int64_t(int64_t)> * > force_per_class, std::vector< int > distance_class, std::vector< const std::function< int64_t(int64_t, int64_t)> * > distance_per_class, std::vector< EnergyCost > path_energy_cost, std::vector< bool > path_has_cost_when_empty)
void ChangeLoops(absl::Span< const int > new_loops)
const std::vector< int > & ChangedLoops() const
const std::vector< int > & ChangedPaths() const
ChainRange Chains(int path) const
static constexpr int kUnassigned
NodeRange Nodes(int path) const
PathState(int num_nodes, std::vector< int > path_start, std::vector< int > path_end)
void ChangePath(int path, absl::Span< const ChainBounds > chains)
void ChangePathSize(int path, int new_num_nodes)
const std::vector< T > & array() const
bool HasBreakConstraints() const
Returns true if any break interval or break distance was defined.
Definition routing.cc:7617
const std::vector< NodePrecedence > & GetNodePrecedences() const
Definition routing.h:3481
RoutingModel * model() const
Returns the model on which the dimension was created.
Definition routing.h:3102
int64_t global_span_cost_coefficient() const
Definition routing.h:3578
int64_t Size() const
Returns the number of next variables in the model.
Definition routing.h:2104
int64_t Start(int vehicle) const
Definition routing.h:1817
RoutingDisjunctionIndex DisjunctionIndex
Definition routing.h:271
GlobalDimensionCumulOptimizer * GetMutableGlobalCumulLPOptimizer(const RoutingDimension &dimension) const
Definition routing.cc:1019
GlobalDimensionCumulOptimizer * GetMutableGlobalCumulMPOptimizer(const RoutingDimension &dimension) const
Definition routing.cc:1027
LocalDimensionCumulOptimizer * GetMutableLocalCumulMPOptimizer(const RoutingDimension &dimension) const
Definition routing.cc:1056
bool IsEnd(int64_t index) const
Returns true if 'index' represents the last node of a route.
Definition routing.h:1823
int vehicles() const
Returns the number of vehicle routes in the model.
Definition routing.h:2102
const std::vector< int > & GetDimensionResourceGroupIndices(const RoutingDimension *dimension) const
Definition routing.cc:1233
int64_t GetSoftSameVehicleCost(int index) const
Returns the cost of the soft same vehicle constraint of index 'index'.
Definition routing.h:1000
LocalDimensionCumulOptimizer * GetMutableLocalCumulLPOptimizer(const RoutingDimension &dimension) const
Definition routing.cc:1048
const std::vector< operations_research::IntVar * > & Nexts() const
Definition routing.h:1841
void PushBack(int64_t height, int64_t weight)
int64_t RangeSumWithThreshold(int64_t threshold_height, int begin_index, int end_index) const
Collection::value_type::second_type & LookupOrInsert(Collection *const collection, const typename Collection::value_type::first_type &key, const typename Collection::value_type::second_type &value)
Definition map_util.h:242
bool FindCopy(const Collection &collection, const Key &key, Value *const value)
Definition map_util.h:190
const MapUtilMappedT< Collection > & FindWithDefault(const Collection &collection, const KeyType &key, const MapUtilMappedT< Collection > &value)
Definition map_util.h:36
For infeasible and unbounded see Not checked if options check_solutions_if_inf_or_unbounded and the If options first_solution_only is false
Definition matchers.h:467
std::function< int64_t(const Model &)> Value(IntegerVariable v)
Definition integer.h:1839
OR-Tools root namespace.
LocalSearchFilter * MakePickupDeliveryFilter(const RoutingModel &routing_model, const PathState *path_state, absl::Span< const PickupDeliveryPair > pairs, const std::vector< RoutingModel::PickupAndDeliveryPolicy > &vehicle_policies)
IntVarLocalSearchFilter * MakeVehicleAmortizedCostFilter(const RoutingModel &routing_model)
Returns a filter computing vehicle amortized costs.
int64_t CapAdd(int64_t x, int64_t y)
bool FillDimensionValuesFromRoutingDimension(int path, int64_t capacity, int64_t span_upper_bound, absl::Span< const DimensionValues::Interval > cumul_of_node, absl::Span< const DimensionValues::Interval > slack_of_node, absl::AnyInvocable< int64_t(int64_t, int64_t) const > evaluator, DimensionValues &dimension_values)
IntVarLocalSearchFilter * MakeRouteConstraintFilter(const RoutingModel &routing_model)
Returns a filter tracking route constraints.
void CapAddTo(int64_t x, int64_t *y)
static constexpr double kInfinity
IntVarLocalSearchFilter * MakeSameVehicleCostFilter(const RoutingModel &routing_model)
Returns a filter computing same vehicle costs.
IntVarLocalSearchFilter * MakeMaxActiveVehiclesFilter(const RoutingModel &routing_model)
Returns a filter ensuring that max active vehicles constraints are enforced.
void AppendLightWeightDimensionFilters(const PathState *path_state, const std::vector< RoutingDimension * > &dimensions, std::vector< LocalSearchFilterManager::FilterEvent > *filters)
int64_t ComputeBestVehicleToResourceAssignment(absl::Span< const int > vehicles, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, std::vector< int > > &resource_indices_per_class, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, absl::flat_hash_set< int > > &ignored_resources_per_class, std::function< const std::vector< int64_t > *(int)> vehicle_to_resource_class_assignment_costs, std::vector< int > *resource_indices)
IntVarLocalSearchFilter * MakeNodeDisjunctionFilter(const RoutingModel &routing_model, bool filter_cost)
Returns a filter ensuring that node disjunction constraints are enforced.
int64_t CapSub(int64_t x, int64_t y)
LocalSearchFilter * MakePathEnergyCostFilter(Solver *solver, std::unique_ptr< PathEnergyCostChecker > checker, absl::string_view dimension_name)
LinearExpr operator-(LinearExpr lhs, const LinearExpr &rhs)
void CapSubFrom(int64_t amount, int64_t *target)
IntVarLocalSearchFilter * MakeCumulBoundsPropagatorFilter(const RoutingDimension &dimension)
Returns a filter handling dimension cumul bounds.
LocalSearchFilter * MakePathStateFilter(Solver *solver, std::unique_ptr< PathState > path_state, const std::vector< IntVar * > &nexts)
ClosedInterval::Iterator end(ClosedInterval interval)
LocalSearchFilter * MakeLightVehicleBreaksFilter(Solver *solver, std::unique_ptr< LightVehicleBreaksChecker > checker, absl::string_view dimension_name)
bool PropagateTransitAndSpan(int path, DimensionValues &dimension_values)
IntVarLocalSearchFilter * MakePathCumulFilter(const RoutingDimension &dimension, bool propagate_own_objective_value, bool filter_objective_cost, bool may_use_optimizers)
Returns a filter handling dimension costs and constraints.
IntVarLocalSearchFilter * MakeCPFeasibilityFilter(RoutingModel *routing_model)
Returns a filter checking the current solution using CP propagation.
bool ComputeVehicleToResourceClassAssignmentCosts(int v, double solve_duration_ratio, const RoutingModel::ResourceGroup &resource_group, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, absl::flat_hash_set< int > > &ignored_resources_per_class, const std::function< int64_t(int64_t)> &next_accessor, const std::function< int64_t(int64_t, int64_t)> &transit_accessor, bool optimize_vehicle_costs, LocalDimensionCumulOptimizer *lp_optimizer, LocalDimensionCumulOptimizer *mp_optimizer, std::vector< int64_t > *assignment_costs, std::vector< std::vector< int64_t > > *cumul_values, std::vector< std::vector< int64_t > > *break_values)
LinearExpr operator+(LinearExpr lhs, const LinearExpr &rhs)
void FillPrePostVisitValues(int path, const DimensionValues &dimension_values, std::optional< absl::AnyInvocable< int64_t(int64_t, int64_t) const > > pre_travel_evaluator, std::optional< absl::AnyInvocable< int64_t(int64_t, int64_t) const > > post_travel_evaluator, PrePostVisitValues &visit_values)
IntVarLocalSearchFilter * MakeOrderedActivityGroupFilter(const RoutingModel &routing_model)
int64_t CapProd(int64_t x, int64_t y)
LocalSearchFilter * MakeVehicleVarFilter(const RoutingModel &routing_model, const PathState *path_state)
Returns a filter checking that vehicle variable domains are respected.
static const int kUnassigned
Definition routing.cc:507
void AppendDimensionCumulFilters(const std::vector< RoutingDimension * > &dimensions, const RoutingSearchParameters &parameters, bool filter_objective_cost, bool use_chain_cumul_filter, std::vector< LocalSearchFilterManager::FilterEvent > *filters)
LocalSearchFilter * MakeResourceAssignmentFilter(LocalDimensionCumulOptimizer *lp_optimizer, LocalDimensionCumulOptimizer *mp_optimizer, bool propagate_own_objective_value, bool filter_objective_cost)
bool PropagateLightweightVehicleBreaks(int path, DimensionValues &dimension_values, absl::Span< const std::pair< int64_t, int64_t > > interbreaks)
int MostSignificantBitPosition32(uint32_t n)
Definition bitset.h:276
IntVarLocalSearchFilter * MakeTypeRegulationsFilter(const RoutingModel &routing_model)
Returns a filter ensuring type regulation constraints are enforced.
IntVarLocalSearchFilter * MakeGlobalLPCumulFilter(GlobalDimensionCumulOptimizer *lp_optimizer, GlobalDimensionCumulOptimizer *mp_optimizer, bool filter_objective_cost)
Returns a filter checking global linear constraints and costs.
IntVarLocalSearchFilter * MakeActiveNodeGroupFilter(const RoutingModel &routing_model)
int64_t CapOpp(int64_t v)
LocalSearchFilter * MakeDimensionFilter(Solver *solver, std::unique_ptr< DimensionChecker > checker, absl::string_view dimension_name)
STL namespace.
trees with all degrees equal to
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
ABSL_FLAG(bool, routing_strong_debug_checks, false, "Run stronger checks in debug; these stronger tests might change " "the complexity of the code in particular.")
static const int64_t kint64max
Definition types.h:32
static const int64_t kint64min
Definition types.h:31