Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
routing_filters.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// Implementation of local search filters for routing models.
15
17
18#include <stddef.h>
19
20#include <algorithm>
21#include <cstdint>
22#include <deque>
23#include <functional>
24#include <limits>
25#include <memory>
26#include <optional>
27#include <string>
28#include <tuple>
29#include <utility>
30#include <vector>
31
32#include "absl/algorithm/container.h"
33#include "absl/container/btree_set.h"
34#include "absl/container/flat_hash_map.h"
35#include "absl/container/flat_hash_set.h"
36#include "absl/flags/flag.h"
37#include "absl/log/check.h"
38#include "absl/strings/str_cat.h"
39#include "absl/strings/string_view.h"
40#include "absl/types/span.h"
44#include "ortools/base/types.h"
49#include "ortools/constraint_solver/routing_parameters.pb.h"
51#include "ortools/util/bitset.h"
55
56ABSL_FLAG(bool, routing_strong_debug_checks, false,
57 "Run stronger checks in debug; these stronger tests might change "
58 "the complexity of the code in particular.");
59
60namespace operations_research {
61
62namespace {
63// Route constraint filter
64
65class RouteConstraintFilter : public BasePathFilter {
66 public:
67 explicit RouteConstraintFilter(const RoutingModel& routing_model)
68 : BasePathFilter(routing_model.Nexts(),
69 routing_model.Size() + routing_model.vehicles(),
70 routing_model.GetPathsMetadata()),
71 routing_model_(routing_model),
72 current_vehicle_cost_(0),
73 delta_vehicle_cost_(0),
74 current_vehicle_costs_(routing_model.vehicles(), 0) {
75 start_to_vehicle_.resize(Size(), -1);
76 vehicle_to_start_.resize(routing_model.vehicles());
77 for (int v = 0; v < routing_model.vehicles(); v++) {
78 const int64_t start = routing_model.Start(v);
79 start_to_vehicle_[start] = v;
80 vehicle_to_start_[v] = start;
81 }
82 }
83 ~RouteConstraintFilter() override = default;
84 std::string DebugString() const override { return "RouteConstraintFilter"; }
85 int64_t GetSynchronizedObjectiveValue() const override {
86 return current_vehicle_cost_;
87 }
88 int64_t GetAcceptedObjectiveValue() const override {
89 return lns_detected() ? 0 : delta_vehicle_cost_;
90 }
91
92 private:
93 void OnSynchronizePathFromStart(int64_t start) override {
94 route_.clear();
95 int64_t node = start;
96 while (node < Size()) {
97 route_.push_back(node);
98 node = Value(node);
99 }
100 route_.push_back(node);
101 std::optional<int64_t> route_cost = routing_model_.GetRouteCost(route_);
102 DCHECK(route_cost.has_value());
103 current_vehicle_costs_[start_to_vehicle_[start]] = route_cost.value();
104 }
105 void OnAfterSynchronizePaths() override {
106 current_vehicle_cost_ = 0;
107 for (int vehicle = 0; vehicle < vehicle_to_start_.size(); vehicle++) {
108 const int64_t start = vehicle_to_start_[vehicle];
109 DCHECK_EQ(vehicle, start_to_vehicle_[start]);
110 if (!IsVarSynced(start)) {
111 return;
112 }
113 CapAddTo(current_vehicle_costs_[vehicle], &current_vehicle_cost_);
114 }
115 }
116 bool InitializeAcceptPath() override {
117 delta_vehicle_cost_ = current_vehicle_cost_;
118 return true;
119 }
120 bool AcceptPath(int64_t path_start, int64_t /*chain_start*/,
121 int64_t /*chain_end*/) override {
122 delta_vehicle_cost_ =
123 CapSub(delta_vehicle_cost_,
124 current_vehicle_costs_[start_to_vehicle_[path_start]]);
125 route_.clear();
126 int64_t node = path_start;
127 while (node < Size()) {
128 route_.push_back(node);
129 node = GetNext(node);
130 }
131 route_.push_back(node);
132 std::optional<int64_t> route_cost = routing_model_.GetRouteCost(route_);
133 if (!route_cost.has_value()) {
134 return false;
135 }
136 CapAddTo(route_cost.value(), &delta_vehicle_cost_);
137 return true;
138 }
139 bool FinalizeAcceptPath(int64_t /*objective_min*/,
140 int64_t objective_max) override {
141 return delta_vehicle_cost_ <= objective_max;
142 }
143
144 const RoutingModel& routing_model_;
145 int64_t current_vehicle_cost_;
146 int64_t delta_vehicle_cost_;
147 std::vector<int64_t> current_vehicle_costs_;
148 std::vector<int64_t> vehicle_to_start_;
149 std::vector<int> start_to_vehicle_;
150 std::vector<int64_t> route_;
151};
152
153} // namespace
154
156 const RoutingModel& routing_model) {
157 return routing_model.solver()->RevAlloc(
158 new RouteConstraintFilter(routing_model));
159}
160
161namespace {
162
163// Max active vehicles filter.
164
165class MaxActiveVehiclesFilter : public IntVarLocalSearchFilter {
166 public:
167 explicit MaxActiveVehiclesFilter(const RoutingModel& routing_model)
168 : IntVarLocalSearchFilter(routing_model.Nexts()),
169 routing_model_(routing_model),
170 is_active_(routing_model.vehicles(), false),
171 active_vehicles_(0) {}
172 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
173 int64_t /*objective_min*/, int64_t /*objective_max*/) override {
174 const int64_t kUnassigned = -1;
175 const Assignment::IntContainer& container = delta->IntVarContainer();
176 int current_active_vehicles = active_vehicles_;
177 for (const IntVarElement& new_element : container.elements()) {
178 IntVar* const var = new_element.Var();
179 int64_t index = kUnassigned;
180 if (FindIndex(var, &index) && routing_model_.IsStart(index)) {
181 if (new_element.Min() != new_element.Max()) {
182 // LNS detected.
183 return true;
184 }
185 const int vehicle = routing_model_.VehicleIndex(index);
186 const bool is_active =
187 (new_element.Min() != routing_model_.End(vehicle));
188 if (is_active && !is_active_[vehicle]) {
189 ++current_active_vehicles;
190 } else if (!is_active && is_active_[vehicle]) {
191 --current_active_vehicles;
192 }
193 }
194 }
195 return current_active_vehicles <=
196 routing_model_.GetMaximumNumberOfActiveVehicles();
197 }
198
199 private:
200 void OnSynchronize(const Assignment* /*delta*/) override {
201 active_vehicles_ = 0;
202 for (int i = 0; i < routing_model_.vehicles(); ++i) {
203 const int index = routing_model_.Start(i);
204 if (IsVarSynced(index) && Value(index) != routing_model_.End(i)) {
205 is_active_[i] = true;
206 ++active_vehicles_;
207 } else {
208 is_active_[i] = false;
209 }
210 }
211 }
212
213 const RoutingModel& routing_model_;
214 std::vector<bool> is_active_;
215 int active_vehicles_;
216};
217} // namespace
218
220 const RoutingModel& routing_model) {
221 return routing_model.solver()->RevAlloc(
222 new MaxActiveVehiclesFilter(routing_model));
223}
224
225namespace {
226
227class ActiveNodeGroupFilter : public IntVarLocalSearchFilter {
228 public:
229 explicit ActiveNodeGroupFilter(const RoutingModel& routing_model)
230 : IntVarLocalSearchFilter(routing_model.Nexts()),
231 routing_model_(routing_model),
232 active_count_per_group_(routing_model.GetSameActivityGroupsCount(),
233 {.active = 0, .unknown = 0}),
234 node_is_active_(routing_model.Nexts().size(), false),
235 node_is_unknown_(routing_model.Nexts().size(), false) {}
236
237 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
238 int64_t /*objective_min*/, int64_t /*objective_max*/) override {
239 active_count_per_group_.Revert();
240 const Assignment::IntContainer& container = delta->IntVarContainer();
241 for (const IntVarElement& new_element : container.elements()) {
242 IntVar* const var = new_element.Var();
243 int64_t index = -1;
244 if (!FindIndex(var, &index)) continue;
245 const int group = routing_model_.GetSameActivityGroupOfIndex(index);
246 ActivityCounts counts = active_count_per_group_.Get(group);
247 // Change contribution to counts: remove old state, add new state.
248 if (node_is_unknown_[index]) --counts.unknown;
249 if (node_is_active_[index]) --counts.active;
250 if (new_element.Min() != new_element.Max()) {
251 ++counts.unknown;
252 } else if (new_element.Min() != index) {
253 ++counts.active;
254 }
255 active_count_per_group_.Set(group, counts);
256 }
257 for (const int group : active_count_per_group_.ChangedIndices()) {
258 const ActivityCounts counts = active_count_per_group_.Get(group);
259 const int group_size =
260 routing_model_.GetSameActivityIndicesOfGroup(group).size();
261 // The group constraint is respected iff either 0 or group size is inside
262 // interval [num_active, num_active + num_unknown],
263 if (counts.active == 0) continue;
264 if (counts.active <= group_size &&
265 group_size <= counts.active + counts.unknown) {
266 continue;
267 }
268 return false;
269 }
270 return true;
271 }
272 std::string DebugString() const override { return "ActiveNodeGroupFilter"; }
273
274 private:
275 void OnSynchronize(const Assignment* /*delta*/) override {
276 const int num_groups = routing_model_.GetSameActivityGroupsCount();
277 for (int group = 0; group < num_groups; ++group) {
278 ActivityCounts counts = {.active = 0, .unknown = 0};
279 for (int node : routing_model_.GetSameActivityIndicesOfGroup(group)) {
280 if (IsVarSynced(node)) {
281 const bool is_active = (Value(node) != node);
282 node_is_active_[node] = is_active;
283 node_is_unknown_[node] = false;
284 counts.active += is_active ? 1 : 0;
285 } else {
286 ++counts.unknown;
287 node_is_unknown_[node] = true;
288 node_is_active_[node] = false;
289 }
290 }
291 active_count_per_group_.Set(group, counts);
292 }
293 active_count_per_group_.Commit();
294 }
295
296 const RoutingModel& routing_model_;
297 struct ActivityCounts {
298 int active;
299 int unknown;
300 };
301 CommittableVector<ActivityCounts> active_count_per_group_;
302 // node_is_active_[node] is true iff node was synced and active at last
303 // Synchronize().
304 std::vector<bool> node_is_active_;
305 // node_is_unknown_[node] is true iff node was not synced at last
306 // Synchronize().
307 std::vector<bool> node_is_unknown_;
308};
309
310} // namespace
311
313 const RoutingModel& routing_model) {
314 return routing_model.solver()->RevAlloc(
315 new ActiveNodeGroupFilter(routing_model));
316}
317
318namespace {
319
320// Node disjunction filter class.
321class NodeDisjunctionFilter : public IntVarLocalSearchFilter {
322 public:
323 explicit NodeDisjunctionFilter(const RoutingModel& routing_model,
324 bool filter_cost)
325 : IntVarLocalSearchFilter(routing_model.Nexts()),
326 routing_model_(routing_model),
327 count_per_disjunction_(routing_model.GetNumberOfDisjunctions(),
328 {.active = 0, .inactive = 0}),
329 synchronized_objective_value_(std::numeric_limits<int64_t>::min()),
330 accepted_objective_value_(std::numeric_limits<int64_t>::min()),
331 filter_cost_(filter_cost),
332 has_mandatory_disjunctions_(routing_model.HasMandatoryDisjunctions()) {}
333
334 using Disjunction = RoutingModel::DisjunctionIndex;
335
336 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
337 int64_t /*objective_min*/, int64_t objective_max) override {
338 count_per_disjunction_.Revert();
339 bool lns_detected = false;
340 // Update the active/inactive counts of each modified disjunction.
341 for (const IntVarElement& element : delta->IntVarContainer().elements()) {
342 int64_t node = -1;
343 if (!FindIndex(element.Var(), &node)) continue;
344 lns_detected |= element.Min() != element.Max();
345 // Compute difference in how this node contributes to activity counts.
346 const bool is_var_synced = IsVarSynced(node);
347 const bool was_active = is_var_synced && Value(node) != node;
348 const bool is_active = node < element.Min() || element.Max() < node;
349 ActivityCount contribution_delta = {.active = 0, .inactive = 0};
350 if (is_var_synced) {
351 contribution_delta.active -= was_active;
352 contribution_delta.inactive -= !was_active;
353 }
354 contribution_delta.active += is_active;
355 contribution_delta.inactive += !is_active;
356 // Common shortcut: if the change is neutral, counts stay the same.
357 if (contribution_delta.active == 0 && contribution_delta.inactive == 0) {
358 continue;
359 }
360 // Change counts of all disjunctions affected by this node.
361 for (const Disjunction disjunction :
362 routing_model_.GetDisjunctionIndices(node)) {
363 ActivityCount new_count =
364 count_per_disjunction_.Get(disjunction.value());
365 new_count.active += contribution_delta.active;
366 new_count.inactive += contribution_delta.inactive;
367 count_per_disjunction_.Set(disjunction.value(), new_count);
368 }
369 }
370 // Check if any disjunction has too many active nodes.
371 for (const int index : count_per_disjunction_.ChangedIndices()) {
372 if (count_per_disjunction_.Get(index).active >
373 routing_model_.GetDisjunctionMaxCardinality(Disjunction(index))) {
374 return false;
375 }
376 }
377 if (lns_detected || (!filter_cost_ && !has_mandatory_disjunctions_)) {
378 accepted_objective_value_ = 0;
379 return true;
380 }
381 // Update penalty costs for disjunctions.
382 accepted_objective_value_ = synchronized_objective_value_;
383 for (const int index : count_per_disjunction_.ChangedIndices()) {
384 // If num inactives did not change, skip. Common shortcut.
385 const int old_inactives =
386 count_per_disjunction_.GetCommitted(index).inactive;
387 const int new_inactives = count_per_disjunction_.Get(index).inactive;
388 if (old_inactives == new_inactives) continue;
389 // If this disjunction has no penalty for inactive nodes, skip.
390 const Disjunction disjunction(index);
391 const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction);
392 if (penalty == 0) continue;
393
394 // Compute the new cost of activity bound violations.
395 const int max_inactives =
396 routing_model_.GetDisjunctionNodeIndices(disjunction).size() -
397 routing_model_.GetDisjunctionMaxCardinality(disjunction);
398 int new_violation = std::max(0, new_inactives - max_inactives);
399 int old_violation = std::max(0, old_inactives - max_inactives);
400 // If nodes are mandatory, there can be no violation.
401 if (penalty < 0 && new_violation > 0) return false;
402 if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) ==
403 RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE) {
404 new_violation = std::min(1, new_violation);
405 old_violation = std::min(1, old_violation);
406 }
407 CapAddTo(CapProd(penalty, (new_violation - old_violation)),
408 &accepted_objective_value_);
409 }
410 // Only compare to max as a cost lower bound is computed.
411 return accepted_objective_value_ <= objective_max;
412 }
413 std::string DebugString() const override { return "NodeDisjunctionFilter"; }
414 int64_t GetSynchronizedObjectiveValue() const override {
415 return synchronized_objective_value_;
416 }
417 int64_t GetAcceptedObjectiveValue() const override {
418 return accepted_objective_value_;
419 }
420
421 private:
422 void OnSynchronize(const Assignment* /*delta*/) override {
423 synchronized_objective_value_ = 0;
424 count_per_disjunction_.Revert();
425 const int num_disjunctions = routing_model_.GetNumberOfDisjunctions();
426 for (Disjunction disjunction(0); disjunction < num_disjunctions;
427 ++disjunction) {
428 // Count number of active/inactive nodes of this disjunction.
429 ActivityCount count = {.active = 0, .inactive = 0};
430 const auto& nodes = routing_model_.GetDisjunctionNodeIndices(disjunction);
431 for (const int64_t node : nodes) {
432 if (!IsVarSynced(node)) continue;
433 const int is_active = Value(node) != node;
434 count.active += is_active;
435 count.inactive += !is_active;
436 }
437 count_per_disjunction_.Set(disjunction.value(), count);
438 // Add penalty of this disjunction to total cost.
439 if (!filter_cost_) continue;
440 const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction);
441 const int max_actives =
442 routing_model_.GetDisjunctionMaxCardinality(disjunction);
443 int violation = count.inactive - (nodes.size() - max_actives);
444 if (violation > 0 && penalty > 0) {
445 if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) ==
446 RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE) {
447 violation = std::min(1, violation);
448 }
449 CapAddTo(CapProd(penalty, violation), &synchronized_objective_value_);
450 }
451 }
452 count_per_disjunction_.Commit();
453 accepted_objective_value_ = synchronized_objective_value_;
454 }
455
456 const RoutingModel& routing_model_;
457 struct ActivityCount {
458 int active = 0;
459 int inactive = 0;
460 };
461 CommittableVector<ActivityCount> count_per_disjunction_;
462 int64_t synchronized_objective_value_;
463 int64_t accepted_objective_value_;
464 const bool filter_cost_;
465 const bool has_mandatory_disjunctions_;
466};
467} // namespace
468
470 const RoutingModel& routing_model, bool filter_cost) {
471 return routing_model.solver()->RevAlloc(
472 new NodeDisjunctionFilter(routing_model, filter_cost));
473}
474
475const int64_t BasePathFilter::kUnassigned = -1;
476
477BasePathFilter::BasePathFilter(const std::vector<IntVar*>& nexts,
478 int next_domain_size,
479 const PathsMetadata& paths_metadata)
481 paths_metadata_(paths_metadata),
482 node_path_starts_(next_domain_size, kUnassigned),
483 new_synchronized_unperformed_nodes_(nexts.size()),
484 new_nexts_(nexts.size(), kUnassigned),
485 touched_paths_(nexts.size()),
486 touched_path_chain_start_ends_(nexts.size(), {kUnassigned, kUnassigned}),
487 ranks_(next_domain_size, kUnassigned),
488 lns_detected_(false) {}
489
491 const Assignment* /*deltadelta*/,
492 int64_t objective_min, int64_t objective_max) {
493 lns_detected_ = false;
494 for (const int touched : delta_touched_) {
495 new_nexts_[touched] = kUnassigned;
496 }
497 delta_touched_.clear();
498 const Assignment::IntContainer& container = delta->IntVarContainer();
499 delta_touched_.reserve(container.Size());
500 // Determining touched paths and their touched chain start and ends (a node is
501 // touched if it corresponds to an element of delta or that an element of
502 // delta points to it).
503 // The start and end of a touched path subchain will have remained on the same
504 // path and will correspond to the min and max ranks of touched nodes in the
505 // current assignment.
506 for (int64_t touched_path : touched_paths_.PositionsSetAtLeastOnce()) {
507 touched_path_chain_start_ends_[touched_path] = {kUnassigned, kUnassigned};
508 }
509 touched_paths_.SparseClearAll();
510
511 const auto update_touched_path_chain_start_end = [this](int64_t index) {
512 const int64_t start = node_path_starts_[index];
513 if (start == kUnassigned) return;
514 touched_paths_.Set(start);
515
516 int64_t& chain_start = touched_path_chain_start_ends_[start].first;
517 if (chain_start == kUnassigned || paths_metadata_.IsStart(index) ||
518 ranks_[index] < ranks_[chain_start]) {
519 chain_start = index;
520 }
521
522 int64_t& chain_end = touched_path_chain_start_ends_[start].second;
523 if (chain_end == kUnassigned || paths_metadata_.IsEnd(index) ||
524 ranks_[index] > ranks_[chain_end]) {
525 chain_end = index;
526 }
527 };
528
529 for (const IntVarElement& new_element : container.elements()) {
530 IntVar* const var = new_element.Var();
531 int64_t index = kUnassigned;
532 if (FindIndex(var, &index)) {
533 if (!new_element.Bound()) {
534 // LNS detected
535 lns_detected_ = true;
536 return true;
537 }
538 new_nexts_[index] = new_element.Value();
539 delta_touched_.push_back(index);
540 update_touched_path_chain_start_end(index);
541 update_touched_path_chain_start_end(new_nexts_[index]);
542 }
543 }
544 // Checking feasibility of touched paths.
545 if (!InitializeAcceptPath()) return false;
546 for (const int64_t touched_start : touched_paths_.PositionsSetAtLeastOnce()) {
547 const std::pair<int64_t, int64_t> start_end =
548 touched_path_chain_start_ends_[touched_start];
549 if (!AcceptPath(touched_start, start_end.first, start_end.second)) {
550 return false;
551 }
552 }
553 // NOTE: FinalizeAcceptPath() is only called if InitializeAcceptPath() is true
554 // and all paths are accepted.
555 return FinalizeAcceptPath(objective_min, objective_max);
556}
557
558void BasePathFilter::ComputePathStarts(std::vector<int64_t>* path_starts,
559 std::vector<int>* index_to_path) {
560 path_starts->clear();
561 const int nexts_size = Size();
562 index_to_path->assign(nexts_size, kUnassigned);
563 Bitset64<> has_prevs(nexts_size);
564 for (int i = 0; i < nexts_size; ++i) {
565 if (!IsVarSynced(i)) {
566 has_prevs.Set(i);
567 } else {
568 const int next = Value(i);
569 if (next < nexts_size) {
570 has_prevs.Set(next);
571 }
572 }
573 }
574 for (int i = 0; i < nexts_size; ++i) {
575 if (!has_prevs[i]) {
576 (*index_to_path)[i] = path_starts->size();
577 path_starts->push_back(i);
578 }
579 }
580}
581
582void BasePathFilter::SynchronizeFullAssignment() {
583 for (int64_t index = 0; index < Size(); index++) {
584 if (IsVarSynced(index) && Value(index) == index &&
585 node_path_starts_[index] != kUnassigned) {
586 // index was performed before and is now unperformed.
587 new_synchronized_unperformed_nodes_.Set(index);
588 }
589 }
590 // Marking unactive nodes (which are not on a path).
591 node_path_starts_.assign(node_path_starts_.size(), kUnassigned);
592 // Marking nodes on a path and storing next values.
593 const int nexts_size = Size();
594 for (int path = 0; path < NumPaths(); ++path) {
595 const int64_t start = Start(path);
596 node_path_starts_[start] = start;
597 if (IsVarSynced(start)) {
598 int64_t next = Value(start);
599 while (next < nexts_size) {
600 int64_t node = next;
601 node_path_starts_[node] = start;
602 DCHECK(IsVarSynced(node));
603 next = Value(node);
604 }
605 node_path_starts_[next] = start;
606 }
607 node_path_starts_[End(path)] = start;
608 }
609 for (const int touched : delta_touched_) {
610 new_nexts_[touched] = kUnassigned;
611 }
612 delta_touched_.clear();
613 OnBeforeSynchronizePaths(/*synchronizing_all_paths=*/true);
614 UpdateAllRanks();
615 OnAfterSynchronizePaths();
616}
617
619 new_synchronized_unperformed_nodes_.ClearAll();
620 if (delta == nullptr || delta->Empty() ||
621 absl::c_all_of(ranks_, [](int rank) { return rank == kUnassigned; })) {
622 SynchronizeFullAssignment();
623 return;
624 }
625 const Assignment::IntContainer& container = delta->IntVarContainer();
626 touched_paths_.SparseClearAll();
627 for (const IntVarElement& new_element : container.elements()) {
628 int64_t index = kUnassigned;
629 if (FindIndex(new_element.Var(), &index)) {
630 const int64_t start = node_path_starts_[index];
631 if (start != kUnassigned) {
632 touched_paths_.Set(start);
633 if (Value(index) == index) {
634 // New unperformed node (its previous start isn't unassigned).
635 DCHECK_LT(index, new_nexts_.size());
636 new_synchronized_unperformed_nodes_.Set(index);
637 node_path_starts_[index] = kUnassigned;
638 }
639 }
640 }
641 }
642 for (const int touched : delta_touched_) {
643 new_nexts_[touched] = kUnassigned;
644 }
645 delta_touched_.clear();
646 OnBeforeSynchronizePaths(/*synchronizing_all_paths=*/false);
647 for (const int64_t touched_start : touched_paths_.PositionsSetAtLeastOnce()) {
648 int64_t node = touched_start;
649 while (node < Size()) {
650 node_path_starts_[node] = touched_start;
651 node = Value(node);
652 }
653 node_path_starts_[node] = touched_start;
654 UpdatePathRanksFromStart(touched_start);
655 OnSynchronizePathFromStart(touched_start);
656 }
657 OnAfterSynchronizePaths();
658}
659
660void BasePathFilter::UpdateAllRanks() {
661 ranks_.assign(ranks_.size(), kUnassigned);
662 for (int r = 0; r < NumPaths(); ++r) {
663 const int64_t start = Start(r);
664 if (!IsVarSynced(start)) continue;
665 UpdatePathRanksFromStart(start);
666 OnSynchronizePathFromStart(start);
667 }
668}
669
670void BasePathFilter::UpdatePathRanksFromStart(int start) {
671 int rank = 0;
672 int64_t node = start;
673 while (node < Size()) {
674 ranks_[node] = rank;
675 rank++;
676 node = Value(node);
677 }
678 ranks_[node] = rank;
679}
680
681namespace {
682
683class VehicleAmortizedCostFilter : public BasePathFilter {
684 public:
685 explicit VehicleAmortizedCostFilter(const RoutingModel& routing_model);
686 ~VehicleAmortizedCostFilter() override = default;
687 std::string DebugString() const override {
688 return "VehicleAmortizedCostFilter";
689 }
690 int64_t GetSynchronizedObjectiveValue() const override {
691 return current_vehicle_cost_;
692 }
693 int64_t GetAcceptedObjectiveValue() const override {
694 return lns_detected() ? 0 : delta_vehicle_cost_;
695 }
696
697 private:
698 void OnSynchronizePathFromStart(int64_t start) override;
699 void OnAfterSynchronizePaths() override;
700 bool InitializeAcceptPath() override;
701 bool AcceptPath(int64_t path_start, int64_t chain_start,
702 int64_t chain_end) override;
703 bool FinalizeAcceptPath(int64_t objective_min,
704 int64_t objective_max) override;
705
706 int64_t current_vehicle_cost_;
707 int64_t delta_vehicle_cost_;
708 std::vector<int> current_route_lengths_;
709 std::vector<int64_t> start_to_end_;
710 std::vector<int> start_to_vehicle_;
711 std::vector<int64_t> vehicle_to_start_;
712 const std::vector<int64_t>& linear_cost_factor_of_vehicle_;
713 const std::vector<int64_t>& quadratic_cost_factor_of_vehicle_;
714};
715
716VehicleAmortizedCostFilter::VehicleAmortizedCostFilter(
717 const RoutingModel& routing_model)
718 : BasePathFilter(routing_model.Nexts(),
719 routing_model.Size() + routing_model.vehicles(),
720 routing_model.GetPathsMetadata()),
721 current_vehicle_cost_(0),
722 delta_vehicle_cost_(0),
723 current_route_lengths_(Size(), -1),
724 linear_cost_factor_of_vehicle_(
725 routing_model.GetAmortizedLinearCostFactorOfVehicles()),
726 quadratic_cost_factor_of_vehicle_(
727 routing_model.GetAmortizedQuadraticCostFactorOfVehicles()) {
728 start_to_end_.resize(Size(), -1);
729 start_to_vehicle_.resize(Size(), -1);
730 vehicle_to_start_.resize(routing_model.vehicles());
731 for (int v = 0; v < routing_model.vehicles(); v++) {
732 const int64_t start = routing_model.Start(v);
733 start_to_vehicle_[start] = v;
734 start_to_end_[start] = routing_model.End(v);
735 vehicle_to_start_[v] = start;
736 }
737}
738
739void VehicleAmortizedCostFilter::OnSynchronizePathFromStart(int64_t start) {
740 const int64_t end = start_to_end_[start];
741 CHECK_GE(end, 0);
742 const int route_length = Rank(end) - 1;
743 CHECK_GE(route_length, 0);
744 current_route_lengths_[start] = route_length;
745}
746
747void VehicleAmortizedCostFilter::OnAfterSynchronizePaths() {
748 current_vehicle_cost_ = 0;
749 for (int vehicle = 0; vehicle < vehicle_to_start_.size(); vehicle++) {
750 const int64_t start = vehicle_to_start_[vehicle];
751 DCHECK_EQ(vehicle, start_to_vehicle_[start]);
752 if (!IsVarSynced(start)) {
753 return;
754 }
755 const int route_length = current_route_lengths_[start];
756 DCHECK_GE(route_length, 0);
757
758 if (route_length == 0) {
759 // The path is empty.
760 continue;
761 }
762
763 const int64_t linear_cost_factor = linear_cost_factor_of_vehicle_[vehicle];
764 const int64_t route_length_cost =
765 CapProd(quadratic_cost_factor_of_vehicle_[vehicle],
766 route_length * route_length);
767
768 CapAddTo(CapSub(linear_cost_factor, route_length_cost),
769 &current_vehicle_cost_);
770 }
771}
772
773bool VehicleAmortizedCostFilter::InitializeAcceptPath() {
774 delta_vehicle_cost_ = current_vehicle_cost_;
775 return true;
776}
777
778bool VehicleAmortizedCostFilter::AcceptPath(int64_t path_start,
779 int64_t chain_start,
780 int64_t chain_end) {
781 // Number of nodes previously between chain_start and chain_end
782 const int previous_chain_nodes = Rank(chain_end) - 1 - Rank(chain_start);
783 CHECK_GE(previous_chain_nodes, 0);
784 int new_chain_nodes = 0;
785 int64_t node = GetNext(chain_start);
786 while (node != chain_end) {
787 new_chain_nodes++;
788 node = GetNext(node);
789 }
790
791 const int previous_route_length = current_route_lengths_[path_start];
792 CHECK_GE(previous_route_length, 0);
793 const int new_route_length =
794 previous_route_length - previous_chain_nodes + new_chain_nodes;
795
796 const int vehicle = start_to_vehicle_[path_start];
797 CHECK_GE(vehicle, 0);
798 DCHECK_EQ(path_start, vehicle_to_start_[vehicle]);
799
800 // Update the cost related to used vehicles.
801 // TODO(user): Handle possible overflows.
802 if (previous_route_length == 0) {
803 // The route was empty before, it is no longer the case (changed path).
804 CHECK_GT(new_route_length, 0);
805 CapAddTo(linear_cost_factor_of_vehicle_[vehicle], &delta_vehicle_cost_);
806 } else if (new_route_length == 0) {
807 // The route is now empty.
808 delta_vehicle_cost_ =
809 CapSub(delta_vehicle_cost_, linear_cost_factor_of_vehicle_[vehicle]);
810 }
811
812 // Update the cost related to the sum of the squares of the route lengths.
813 const int64_t quadratic_cost_factor =
814 quadratic_cost_factor_of_vehicle_[vehicle];
815 CapAddTo(CapProd(quadratic_cost_factor,
816 previous_route_length * previous_route_length),
817 &delta_vehicle_cost_);
818 delta_vehicle_cost_ = CapSub(
819 delta_vehicle_cost_,
820 CapProd(quadratic_cost_factor, new_route_length * new_route_length));
821
822 return true;
823}
824
825bool VehicleAmortizedCostFilter::FinalizeAcceptPath(int64_t /*objective_min*/,
826 int64_t objective_max) {
827 return delta_vehicle_cost_ <= objective_max;
828}
829
830} // namespace
831
833 const RoutingModel& routing_model) {
834 return routing_model.solver()->RevAlloc(
835 new VehicleAmortizedCostFilter(routing_model));
836}
837
838namespace {
839
840class TypeRegulationsFilter : public BasePathFilter {
841 public:
842 explicit TypeRegulationsFilter(const RoutingModel& model);
843 ~TypeRegulationsFilter() override = default;
844 std::string DebugString() const override { return "TypeRegulationsFilter"; }
845
846 private:
847 void OnSynchronizePathFromStart(int64_t start) override;
848 bool AcceptPath(int64_t path_start, int64_t chain_start,
849 int64_t chain_end) override;
850
851 bool HardIncompatibilitiesRespected(int vehicle, int64_t chain_start,
852 int64_t chain_end);
853
854 const RoutingModel& routing_model_;
855 std::vector<int> start_to_vehicle_;
856 // The following vector is used to keep track of the type counts for hard
857 // incompatibilities.
858 std::vector<std::vector<int>> hard_incompatibility_type_counts_per_vehicle_;
859 // Used to verify the temporal incompatibilities and requirements.
860 TypeIncompatibilityChecker temporal_incompatibility_checker_;
861 TypeRequirementChecker requirement_checker_;
862};
863
864TypeRegulationsFilter::TypeRegulationsFilter(const RoutingModel& model)
865 : BasePathFilter(model.Nexts(), model.Size() + model.vehicles(),
866 model.GetPathsMetadata()),
867 routing_model_(model),
868 start_to_vehicle_(model.Size(), -1),
869 temporal_incompatibility_checker_(model,
870 /*check_hard_incompatibilities*/ false),
871 requirement_checker_(model) {
872 const int num_vehicles = model.vehicles();
873 const bool has_hard_type_incompatibilities =
874 model.HasHardTypeIncompatibilities();
875 if (has_hard_type_incompatibilities) {
876 hard_incompatibility_type_counts_per_vehicle_.resize(num_vehicles);
877 }
878 const int num_visit_types = model.GetNumberOfVisitTypes();
879 for (int vehicle = 0; vehicle < num_vehicles; vehicle++) {
880 const int64_t start = model.Start(vehicle);
881 start_to_vehicle_[start] = vehicle;
882 if (has_hard_type_incompatibilities) {
883 hard_incompatibility_type_counts_per_vehicle_[vehicle].resize(
884 num_visit_types, 0);
885 }
886 }
887}
888
889void TypeRegulationsFilter::OnSynchronizePathFromStart(int64_t start) {
890 if (!routing_model_.HasHardTypeIncompatibilities()) return;
891
892 const int vehicle = start_to_vehicle_[start];
893 CHECK_GE(vehicle, 0);
894 std::vector<int>& type_counts =
895 hard_incompatibility_type_counts_per_vehicle_[vehicle];
896 std::fill(type_counts.begin(), type_counts.end(), 0);
897 const int num_types = type_counts.size();
898
899 int64_t node = start;
900 while (node < Size()) {
901 DCHECK(IsVarSynced(node));
902 const int type = routing_model_.GetVisitType(node);
903 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
904 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
905 CHECK_LT(type, num_types);
906 type_counts[type]++;
907 }
908 node = Value(node);
909 }
910}
911
912bool TypeRegulationsFilter::HardIncompatibilitiesRespected(int vehicle,
913 int64_t chain_start,
914 int64_t chain_end) {
915 if (!routing_model_.HasHardTypeIncompatibilities()) return true;
916
917 const std::vector<int>& previous_type_counts =
918 hard_incompatibility_type_counts_per_vehicle_[vehicle];
919
920 absl::flat_hash_map</*type*/ int, /*new_count*/ int> new_type_counts;
921 absl::flat_hash_set<int> types_to_check;
922
923 // Go through the new nodes on the path and increment their type counts.
924 int64_t node = GetNext(chain_start);
925 while (node != chain_end) {
926 const int type = routing_model_.GetVisitType(node);
927 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
928 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
929 DCHECK_LT(type, previous_type_counts.size());
930 int& type_count = gtl::LookupOrInsert(&new_type_counts, type,
931 previous_type_counts[type]);
932 if (type_count++ == 0) {
933 // New type on the route, mark to check its incompatibilities.
934 types_to_check.insert(type);
935 }
936 }
937 node = GetNext(node);
938 }
939
940 // Update new_type_counts by decrementing the occurrence of the types of the
941 // nodes no longer on the route.
942 if (IsVarSynced(chain_start)) {
943 node = Value(chain_start);
944 while (node != chain_end) {
945 const int type = routing_model_.GetVisitType(node);
946 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
947 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
948 DCHECK_LT(type, previous_type_counts.size());
949 int& type_count = gtl::LookupOrInsert(&new_type_counts, type,
950 previous_type_counts[type]);
951 CHECK_GE(type_count, 1);
952 type_count--;
953 }
954 node = Value(node);
955 }
956 }
957
958 // Check the incompatibilities for types in types_to_check.
959 for (int type : types_to_check) {
960 for (int incompatible_type :
961 routing_model_.GetHardTypeIncompatibilitiesOfType(type)) {
962 if (gtl::FindWithDefault(new_type_counts, incompatible_type,
963 previous_type_counts[incompatible_type]) > 0) {
964 return false;
965 }
966 }
967 }
968 return true;
969}
970
971bool TypeRegulationsFilter::AcceptPath(int64_t path_start, int64_t chain_start,
972 int64_t chain_end) {
973 const int vehicle = start_to_vehicle_[path_start];
974 CHECK_GE(vehicle, 0);
975 const auto next_accessor = [this](int64_t node) { return GetNext(node); };
976 return HardIncompatibilitiesRespected(vehicle, chain_start, chain_end) &&
977 temporal_incompatibility_checker_.CheckVehicle(vehicle,
978 next_accessor) &&
979 requirement_checker_.CheckVehicle(vehicle, next_accessor);
980}
981
982} // namespace
983
985 const RoutingModel& routing_model) {
986 return routing_model.solver()->RevAlloc(
987 new TypeRegulationsFilter(routing_model));
988}
989
990namespace {
991
992// ChainCumul filter. Version of dimension path filter which is O(delta) rather
993// than O(length of touched paths). Currently only supports dimensions without
994// costs (global and local span cost, soft bounds) and with unconstrained
995// cumul variables except overall capacity and cumul variables of path ends.
996
997class ChainCumulFilter : public BasePathFilter {
998 public:
999 ChainCumulFilter(const RoutingModel& routing_model,
1000 const RoutingDimension& dimension);
1001 ~ChainCumulFilter() override = default;
1002 std::string DebugString() const override {
1003 return "ChainCumulFilter(" + name_ + ")";
1004 }
1005
1006 private:
1007 void OnSynchronizePathFromStart(int64_t start) override;
1008 bool AcceptPath(int64_t path_start, int64_t chain_start,
1009 int64_t chain_end) override;
1010
1011 const std::vector<IntVar*> cumuls_;
1012 std::vector<int64_t> start_to_vehicle_;
1013 std::vector<int64_t> start_to_end_;
1014 std::vector<const RoutingModel::TransitCallback2*> evaluators_;
1015 const std::vector<int64_t> vehicle_capacities_;
1016 std::vector<int64_t> current_path_cumul_mins_;
1017 std::vector<int64_t> current_max_of_path_end_cumul_mins_;
1018 std::vector<int64_t> old_nexts_;
1019 std::vector<int> old_vehicles_;
1020 std::vector<int64_t> current_transits_;
1021 const std::string name_;
1022};
1023
1024ChainCumulFilter::ChainCumulFilter(const RoutingModel& routing_model,
1025 const RoutingDimension& dimension)
1026 : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(),
1027 routing_model.GetPathsMetadata()),
1028 cumuls_(dimension.cumuls()),
1029 evaluators_(routing_model.vehicles(), nullptr),
1030 vehicle_capacities_(dimension.vehicle_capacities()),
1031 current_path_cumul_mins_(dimension.cumuls().size(), 0),
1032 current_max_of_path_end_cumul_mins_(dimension.cumuls().size(), 0),
1033 old_nexts_(routing_model.Size(), kUnassigned),
1034 old_vehicles_(routing_model.Size(), kUnassigned),
1035 current_transits_(routing_model.Size(), 0),
1036 name_(dimension.name()) {
1037 start_to_vehicle_.resize(Size(), -1);
1038 start_to_end_.resize(Size(), -1);
1039 for (int i = 0; i < routing_model.vehicles(); ++i) {
1040 start_to_vehicle_[routing_model.Start(i)] = i;
1041 start_to_end_[routing_model.Start(i)] = routing_model.End(i);
1042 evaluators_[i] = &dimension.transit_evaluator(i);
1043 }
1044}
1045
1046// On synchronization, maintain "propagated" cumul mins and max level of cumul
1047// from each node to the end of the path; to be used by AcceptPath to
1048// incrementally check feasibility.
1049void ChainCumulFilter::OnSynchronizePathFromStart(int64_t start) {
1050 const int vehicle = start_to_vehicle_[start];
1051 std::vector<int64_t> path_nodes;
1052 int64_t node = start;
1053 int64_t cumul = cumuls_[node]->Min();
1054 while (node < Size()) {
1055 path_nodes.push_back(node);
1056 current_path_cumul_mins_[node] = cumul;
1057 const int64_t next = Value(node);
1058 if (next != old_nexts_[node] || vehicle != old_vehicles_[node]) {
1059 old_nexts_[node] = next;
1060 old_vehicles_[node] = vehicle;
1061 current_transits_[node] = (*evaluators_[vehicle])(node, next);
1062 }
1063 CapAddTo(current_transits_[node], &cumul);
1064 cumul = std::max(cumuls_[next]->Min(), cumul);
1065 node = next;
1066 }
1067 path_nodes.push_back(node);
1068 current_path_cumul_mins_[node] = cumul;
1069 int64_t max_cumuls = cumul;
1070 for (int i = path_nodes.size() - 1; i >= 0; --i) {
1071 const int64_t node = path_nodes[i];
1072 max_cumuls = std::max(max_cumuls, current_path_cumul_mins_[node]);
1073 current_max_of_path_end_cumul_mins_[node] = max_cumuls;
1074 }
1075}
1076
1077// The complexity of the method is O(size of chain (chain_start...chain_end).
1078bool ChainCumulFilter::AcceptPath(int64_t path_start, int64_t chain_start,
1079 int64_t chain_end) {
1080 const int vehicle = start_to_vehicle_[path_start];
1081 const int64_t capacity = vehicle_capacities_[vehicle];
1082 int64_t node = chain_start;
1083 int64_t cumul = current_path_cumul_mins_[node];
1084 while (node != chain_end) {
1085 const int64_t next = GetNext(node);
1086 if (IsVarSynced(node) && next == Value(node) &&
1087 vehicle == old_vehicles_[node]) {
1088 CapAddTo(current_transits_[node], &cumul);
1089 } else {
1090 CapAddTo((*evaluators_[vehicle])(node, next), &cumul);
1091 }
1092 cumul = std::max(cumuls_[next]->Min(), cumul);
1093 if (cumul > capacity) return false;
1094 node = next;
1095 }
1096 const int64_t end = start_to_end_[path_start];
1097 const int64_t end_cumul_delta =
1098 CapSub(current_path_cumul_mins_[end], current_path_cumul_mins_[node]);
1099 const int64_t after_chain_cumul_delta =
1100 CapSub(current_max_of_path_end_cumul_mins_[node],
1101 current_path_cumul_mins_[node]);
1102 return CapAdd(cumul, after_chain_cumul_delta) <= capacity &&
1103 CapAdd(cumul, end_cumul_delta) <= cumuls_[end]->Max();
1104}
1105
1106} // namespace
1107
1109 int path, DimensionValues& dimension_values,
1110 absl::Span<const std::pair<int64_t, int64_t>> interbreaks) {
1111 using Interval = DimensionValues::Interval;
1112 using VehicleBreak = DimensionValues::VehicleBreak;
1113 const int64_t total_travel = dimension_values.TravelSums(path).back();
1114 // Improve bounds on span/start max/end min using time windows: breaks that
1115 // must occur inside the path have their duration accumulated into
1116 // lb_span_tw, they also widen [start_max, end_min).
1117 int64_t lb_span_tw = total_travel;
1118 const absl::Span<Interval> cumuls = dimension_values.MutableCumuls(path);
1119 Interval& start = cumuls.front();
1120 Interval& end = cumuls.back();
1121 // TODO(user): consider adding deductions from the path to the break.
1122 for (const VehicleBreak& br : dimension_values.MutableVehicleBreaks(path)) {
1123 // If the break may be performed before the path, after the path, or if it
1124 // is not performed at all, ignore the break.
1125 if (br.is_performed.min == 0) continue;
1126 if (br.end.min <= start.max || end.min <= br.start.max) continue;
1127 // This break must be performed inside the path: widen the path.
1128 CapAddTo(br.duration.min, &lb_span_tw);
1129 if (!start.DecreaseMax(br.start.max)) return false;
1130 if (!end.IncreaseMin(br.end.min)) return false;
1131 }
1132 Interval& span = dimension_values.MutableSpan(path);
1133 if (!span.IncreaseMin(std::max(lb_span_tw, CapSub(end.min, start.max)))) {
1134 return false;
1135 }
1136 // Compute num_feasible_breaks = number of breaks that may fit into route,
1137 // and [breaks_start_min, breaks_end_max) = max coverage of breaks.
1138 int64_t break_start_min = kint64max;
1139 int64_t break_end_max = kint64min;
1140
1141 if (!start.IncreaseMin(CapSub(end.min, span.max))) return false;
1142 if (!end.DecreaseMax(CapAdd(start.max, span.max))) return false;
1143
1144 int num_feasible_breaks = 0;
1145 for (const VehicleBreak& br : dimension_values.MutableVehicleBreaks(path)) {
1146 if (start.min <= br.start.max && br.end.min <= end.max) {
1147 break_start_min = std::min(break_start_min, br.start.min);
1148 break_end_max = std::max(break_end_max, br.end.max);
1149 ++num_feasible_breaks;
1150 }
1151 }
1152 // Improve span/start min/end max using interbreak limits: there must be
1153 // enough breaks inside the path, so that for each limit, the union of
1154 // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end),
1155 // or [start, end) is shorter than max_interbreak.
1156 for (const auto& [max_interbreak, min_break_duration] : interbreaks) {
1157 // Minimal number of breaks depends on total travel:
1158 // 0 breaks for 0 <= total travel <= limit,
1159 // 1 break for limit + 1 <= total travel <= 2 * limit,
1160 // i breaks for i * limit + 1 <= total travel <= (i+1) * limit, ...
1161 if (max_interbreak == 0) {
1162 if (total_travel > 0) return false;
1163 continue;
1164 }
1165 int64_t min_num_breaks =
1166 std::max<int64_t>(0, (total_travel - 1) / max_interbreak);
1167 if (span.min > max_interbreak) {
1168 min_num_breaks = std::max<int64_t>(min_num_breaks, 1);
1169 }
1170 if (min_num_breaks > num_feasible_breaks) return false;
1171 if (!span.IncreaseMin(CapAdd(
1172 total_travel, CapProd(min_num_breaks, min_break_duration)))) {
1173 return false;
1174 }
1175 if (min_num_breaks > 0) {
1176 if (!start.IncreaseMin(CapSub(break_start_min, max_interbreak))) {
1177 return false;
1178 }
1179 if (!end.DecreaseMax(CapAdd(break_end_max, max_interbreak))) {
1180 return false;
1181 }
1182 }
1183 }
1184 return start.DecreaseMax(CapSub(end.max, span.min)) &&
1185 end.IncreaseMin(CapAdd(start.min, span.min));
1186}
1187
1188namespace {
1189
1190class PathCumulFilter : public BasePathFilter {
1191 public:
1192 PathCumulFilter(const RoutingModel& routing_model,
1193 const RoutingDimension& dimension,
1194 bool propagate_own_objective_value,
1195 bool filter_objective_cost, bool may_use_optimizers);
1196 ~PathCumulFilter() override = default;
1197 std::string DebugString() const override {
1198 return "PathCumulFilter(" + name_ + ")";
1199 }
1200 int64_t GetSynchronizedObjectiveValue() const override {
1201 return propagate_own_objective_value_ ? synchronized_objective_value_ : 0;
1202 }
1203 int64_t GetAcceptedObjectiveValue() const override {
1204 return lns_detected() || !propagate_own_objective_value_
1205 ? 0
1206 : accepted_objective_value_;
1207 }
1208 bool UsesDimensionOptimizers() {
1209 if (!may_use_optimizers_) return false;
1210 for (int vehicle = 0; vehicle < routing_model_.vehicles(); ++vehicle) {
1211 if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) return true;
1212 }
1213 return false;
1214 }
1215
1216 private:
1217 using Interval = DimensionValues::Interval;
1218 struct SoftBound {
1219 int64_t bound = -1;
1220 int64_t coefficient = 0;
1221 };
1222
1223 // Data extractors used in constructor.
1224 std::vector<Interval> ExtractInitialCumulIntervals();
1225 std::vector<Interval> ExtractInitialSlackIntervals();
1226 std::vector<std::vector<RoutingDimension::NodePrecedence>>
1227 ExtractNodeIndexToPrecedences() const;
1228 std::vector<SoftBound> ExtractCumulSoftUpperBounds() const;
1229 std::vector<SoftBound> ExtractCumulSoftLowerBounds() const;
1230 std::vector<const PiecewiseLinearFunction*> ExtractCumulPiecewiseLinearCosts()
1231 const;
1232 std::vector<const RoutingModel::TransitCallback2*> ExtractEvaluators() const;
1233 using VehicleBreak = DimensionValues::VehicleBreak;
1234 std::vector<std::vector<VehicleBreak>> ExtractInitialVehicleBreaks() const;
1235
1236 bool InitializeAcceptPath() override {
1237 accepted_objective_value_ = synchronized_objective_value_;
1238 dimension_values_.Revert();
1239 cost_of_path_.Revert();
1240 global_span_cost_.Revert();
1241 location_of_node_.Revert();
1242 return true;
1243 }
1244 bool AcceptPath(int64_t path_start, int64_t chain_start,
1245 int64_t chain_end) override;
1246 bool FinalizeAcceptPath(int64_t objective_min,
1247 int64_t objective_max) override;
1248
1249 void OnBeforeSynchronizePaths(bool synchronizing_all_paths) override;
1250 void OnSynchronizePathFromStart(int64_t start) override;
1251 void OnAfterSynchronizePaths() override;
1252
1253 // TODO(user): consider making those methods external functions.
1254 // Fills the data of path in dimension_values_: nodes, cumuls, travels,
1255 // transits, travel_sums, span limits.
1256 bool FillDimensionValues(int path);
1257 // Propagates the transit constraint, cumul[r] + transit[r] == cumul[r+1],
1258 // in dimension_values_'s current path data.
1259 bool PropagateTransits(int path);
1260 // Propagates the transit constraint supposing that there are no forbidden
1261 // intervals for cumuls. This is faster than considering the intervals.
1262 bool PropagateTransitsWithoutForbiddenIntervals(int path);
1263 // Propagates both the transit constraint and cumul forbidden intervals.
1264 bool PropagateTransitsWithForbiddenIntervals(int path);
1265 // Propagates the span constraint, cumul[start] + span == cumul[end].
1266 bool PropagateSpan(int path);
1267 // Propagates cumul[delivery] <= cumul[pickup] + pickup_to_delivery_limit.
1268 bool PropagatePickupToDeliveryLimits(int path);
1269 // Propagates the vehicle breaks constraint.
1270 bool PropagateVehicleBreaks(int path);
1271
1272 // Returns true iff there are any precedence constraints to enforce.
1273 bool FilterPrecedences() const { return !node_index_to_precedences_.empty(); }
1274 // Returns true iff there is a global span cost.
1275 bool FilterGlobalSpanCost() const {
1276 return global_span_cost_coefficient_ != 0;
1277 }
1278 // Returns true iff the given path has vehicle breaks.
1279 bool FilterVehicleBreaks(int path) const {
1280 return dimension_.HasBreakConstraints() &&
1281 (!dimension_.GetBreakIntervalsOfVehicle(path).empty() ||
1282 !dimension_.GetBreakDistanceDurationOfVehicle(path).empty());
1283 }
1284 // Returns true iff the given path has a linear soft span cost.
1285 bool FilterSoftSpanCost(int path) const {
1286 return dimension_.HasSoftSpanUpperBounds() &&
1287 dimension_.GetSoftSpanUpperBoundForVehicle(path).cost > 0;
1288 }
1289 // Returns true iff the given path has a quadratic soft span cost.
1290 bool FilterSoftSpanQuadraticCost(int path) const {
1291 if (!dimension_.HasQuadraticCostSoftSpanUpperBounds()) return false;
1292 const auto [bound, cost] =
1293 dimension_.GetQuadraticCostSoftSpanUpperBoundForVehicle(path);
1294 return bound < kint64max && cost != 0;
1295 }
1296 // Returns true iff an LP/MP optimizer should be used for the given vehicle.
1297 bool FilterWithDimensionCumulOptimizerForVehicle(int vehicle) const {
1298 if (!may_use_optimizers_) return false;
1299 if (!cumul_piecewise_linear_costs_.empty()) return false;
1300
1301 int num_linear_constraints = 0;
1302 if (dimension_.GetSpanCostCoefficientForVehicle(vehicle) > 0 ||
1303 dimension_.GetSlackCostCoefficientForVehicle(vehicle) > 0) {
1304 ++num_linear_constraints;
1305 }
1306 if (FilterSoftSpanCost(vehicle)) ++num_linear_constraints;
1307 if (!cumul_soft_upper_bounds_.empty()) ++num_linear_constraints;
1308 if (!cumul_soft_lower_bounds_.empty()) ++num_linear_constraints;
1309 if (path_span_upper_bounds_[vehicle] < kint64max) {
1310 ++num_linear_constraints;
1311 }
1312 const bool has_breaks = FilterVehicleBreaks(vehicle);
1313 if (has_breaks) ++num_linear_constraints;
1314 // The DimensionCumulOptimizer is used to compute a more precise value of
1315 // the cost related to the cumul values (soft bounds and span/slack costs).
1316 // It is also used to guarantee feasibility with complex mixes of
1317 // constraints and in particular in the presence of break requests along
1318 // other constraints. Therefore, without breaks, we only use the optimizer
1319 // when the costs are actually used to filter the solutions, i.e. when
1320 // filter_objective_cost_ is true.
1321 return num_linear_constraints >= 2 &&
1322 (has_breaks || filter_objective_cost_);
1323 }
1324
1325 // Data about the routing problem, used as read-only input.
1326 const RoutingModel& routing_model_;
1327 const RoutingDimension& dimension_;
1328 const std::vector<Interval> initial_cumul_;
1329 const std::vector<Interval> initial_slack_;
1330 const std::vector<std::vector<VehicleBreak>> initial_vehicle_breaks_;
1331 // Maps vehicle/path to their values, values are always present.
1332 const std::vector<const RoutingModel::TransitCallback2*> evaluators_;
1333 const std::vector<int64_t> path_capacities_;
1334 const std::vector<int64_t> path_span_upper_bounds_;
1335 const std::vector<int64_t> path_total_slack_cost_coefficients_;
1336
1337 // Global span cost data.
1338 // Global span cost coefficient: the global span interval is
1339 // [min_{vehicle} cumul(start), max_{vehicle} cumul(end)),
1340 // over vehicle that are nonempty or used when empty.
1341 // The global span cost is the length of the interval times this coefficient.
1342 //
1343 // This class keeps data about active paths at Synchronize(): paths sorted
1344 // by decreasing span min, decreasing end min, and increasing start max.
1345 // At Accept(), it first propagates all pathwise constraints in changed
1346 // paths. Then it computes:
1347 // - active_start_max = min_{active vehicle} cumul_[vehicle].front().max
1348 // - active_end_min = max_{active vehicle} cumul_[vehicle].back().min
1349 // - active_span_min = max_{active vehicle} span_[vehicle].min,
1350 // which allow to compute a lower bound on the global span, as
1351 // max(active_end_min - active_start_max, active_span_min, 0).
1352 //
1353 // To find the active_ values, this class merges bounds from the active paths,
1354 // among changed paths and unchanged paths:
1355 // - changed paths have their new bounds in dimension_values_.
1356 // - since committed paths have their bounds sorted in the vectors below,
1357 // scanning for the first path that is unchanged yields the min or max
1358 // value for all unchanged paths.
1359 // Both types of scan are done in O(#changed paths) time.
1360 const int64_t global_span_cost_coefficient_;
1361 struct ValuedPath {
1362 int64_t value;
1363 int path;
1364 bool operator<(const ValuedPath& other) const {
1365 return std::tie(value, path) < std::tie(other.value, other.path);
1366 }
1367 bool operator>(const ValuedPath& other) const {
1368 return std::tie(value, path) > std::tie(other.value, other.path);
1369 }
1370 };
1371 // Committed active paths, sorted by decreasing span min.
1372 std::vector<ValuedPath> paths_by_decreasing_span_min_;
1373 // Committed active paths, sorted by decreasing end min.
1374 std::vector<ValuedPath> paths_by_decreasing_end_min_;
1375 // Committed active paths, sorted by increasing start max.
1376 std::vector<ValuedPath> paths_by_increasing_start_max_;
1377 // Holds committed/current global span cost.
1378 CommittableValue<int64_t> global_span_cost_;
1379
1380 // Empty if there are no soft bounds, otherwise maps each node to its bound.
1381 const std::vector<SoftBound> cumul_soft_upper_bounds_;
1382 // Empty if there are no soft bounds, otherwise maps each node to its bound.
1383 const std::vector<SoftBound> cumul_soft_lower_bounds_;
1384 // Empty if there are no linear costs, otherwise maps each node to its cost.
1385 const std::vector<const PiecewiseLinearFunction*>
1386 cumul_piecewise_linear_costs_;
1387 // True iff some cumul has forbidden intervals.
1388 const bool has_forbidden_intervals_;
1389
1390 // Data reflecting information on path variables for the committed and the
1391 // current solution.
1392 DimensionValues dimension_values_;
1393 // Maps each path to the sum of its path-only costs: span/slack cost,
1394 // soft cumul costs, soft span limits.
1395 CommittableVector<int64_t> cost_of_path_;
1396 int64_t synchronized_objective_value_;
1397 int64_t accepted_objective_value_;
1398
1399 struct RankAndIndex {
1400 int rank = -1;
1401 int index = -1;
1402 };
1403 // When examining a path for pickup to delivery limits, allows to find if
1404 // some pickup alternative is on the path, and at what rank (position).
1405 // Only the Revert() method of CommittableVector is used, to reset all
1406 // locations to the default dummy value before examining a path.
1407 CommittableVector<RankAndIndex> pickup_rank_and_alternative_index_of_pair_;
1408
1409 // node_index_to_precedences_[node_index] contains all NodePrecedence elements
1410 // with node_index as either "first_node" or "second_node".
1411 // This vector is empty if there are no precedences on the dimension_.
1412 const std::vector<std::vector<RoutingDimension::NodePrecedence>>
1413 node_index_to_precedences_;
1414 struct PathAndRank {
1415 int path = -1;
1416 int rank = -1;
1417 };
1418 // Maps the location of each node in the committed and current solutions.
1419 // This is used when enforcing precedence constraints
1420 CommittableVector<PathAndRank> location_of_node_;
1421
1422 // Name of the dimension, for debugging/profiling purposes.
1423 const std::string name_;
1424
1425 LocalDimensionCumulOptimizer* lp_optimizer_;
1426 LocalDimensionCumulOptimizer* mp_optimizer_;
1427 const std::function<int64_t(int64_t)> path_accessor_;
1428 const bool filter_objective_cost_;
1429 // True iff this object may use an LP or MP optimizer to solve sub-problems.
1430 const bool may_use_optimizers_;
1431 const bool propagate_own_objective_value_;
1432};
1433
1434namespace {
1435template <typename T>
1436std::vector<T> SumOfVectors(const std::vector<T>& v1,
1437 const std::vector<T>& v2) {
1438 DCHECK_EQ(v1.size(), v2.size());
1439 std::vector<T> sum(v1.size());
1440 absl::c_transform(v1, v2, sum.begin(), [](T a, T b) { return CapAdd(a, b); });
1441 return sum;
1442}
1443} // namespace
1444
1445std::vector<PathCumulFilter::Interval>
1446PathCumulFilter::ExtractInitialCumulIntervals() {
1447 std::vector<Interval> intervals;
1448 intervals.reserve(dimension_.cumuls().size());
1449 for (const IntVar* cumul : dimension_.cumuls()) {
1450 intervals.push_back({cumul->Min(), cumul->Max()});
1451 }
1452 return intervals;
1453}
1454
1455std::vector<PathCumulFilter::Interval>
1456PathCumulFilter::ExtractInitialSlackIntervals() {
1457 std::vector<Interval> intervals;
1458 intervals.reserve(dimension_.slacks().size());
1459 for (const IntVar* slack : dimension_.slacks()) {
1460 intervals.push_back({slack->Min(), slack->Max()});
1461 }
1462 return intervals;
1463}
1464
1465std::vector<PathCumulFilter::SoftBound>
1466PathCumulFilter::ExtractCumulSoftUpperBounds() const {
1467 const int num_cumuls = dimension_.cumuls().size();
1468 std::vector<SoftBound> bounds(num_cumuls,
1469 {.bound = kint64max, .coefficient = 0});
1470 bool has_some_bound = false;
1471 for (int i = 0; i < num_cumuls; ++i) {
1472 if (!dimension_.HasCumulVarSoftUpperBound(i)) continue;
1473 const int64_t bound = dimension_.GetCumulVarSoftUpperBound(i);
1474 const int64_t coeff = dimension_.GetCumulVarSoftUpperBoundCoefficient(i);
1475 bounds[i] = {.bound = bound, .coefficient = coeff};
1476 has_some_bound |= bound < kint64max && coeff != 0;
1477 }
1478 if (!has_some_bound) bounds.clear();
1479 return bounds;
1480}
1481
1482std::vector<PathCumulFilter::SoftBound>
1483PathCumulFilter::ExtractCumulSoftLowerBounds() const {
1484 const int num_cumuls = dimension_.cumuls().size();
1485 std::vector<SoftBound> bounds(num_cumuls, {.bound = 0, .coefficient = 0});
1486 bool has_some_bound = false;
1487 for (int i = 0; i < num_cumuls; ++i) {
1488 if (!dimension_.HasCumulVarSoftLowerBound(i)) continue;
1489 const int64_t bound = dimension_.GetCumulVarSoftLowerBound(i);
1490 const int64_t coeff = dimension_.GetCumulVarSoftLowerBoundCoefficient(i);
1491 bounds[i] = {.bound = bound, .coefficient = coeff};
1492 has_some_bound |= bound > 0 && coeff != 0;
1493 }
1494 if (!has_some_bound) bounds.clear();
1495 return bounds;
1496}
1497
1498std::vector<const PiecewiseLinearFunction*>
1499PathCumulFilter::ExtractCumulPiecewiseLinearCosts() const {
1500 const int num_cumuls = dimension_.cumuls().size();
1501 std::vector<const PiecewiseLinearFunction*> costs(num_cumuls, nullptr);
1502 bool has_some_cost = false;
1503 for (int i = 0; i < dimension_.cumuls().size(); ++i) {
1504 if (!dimension_.HasCumulVarPiecewiseLinearCost(i)) continue;
1505 const PiecewiseLinearFunction* const cost =
1506 dimension_.GetCumulVarPiecewiseLinearCost(i);
1507 if (cost == nullptr) continue;
1508 has_some_cost = true;
1509 costs[i] = cost;
1510 }
1511 if (!has_some_cost) costs.clear();
1512 return costs;
1513}
1514
1515std::vector<const RoutingModel::TransitCallback2*>
1516PathCumulFilter::ExtractEvaluators() const {
1517 const int num_paths = NumPaths();
1518 std::vector<const RoutingModel::TransitCallback2*> evaluators(num_paths);
1519 for (int i = 0; i < num_paths; ++i) {
1520 evaluators[i] = &dimension_.transit_evaluator(i);
1521 }
1522 return evaluators;
1523}
1524
1525std::vector<std::vector<RoutingDimension::NodePrecedence>>
1526PathCumulFilter::ExtractNodeIndexToPrecedences() const {
1527 std::vector<std::vector<RoutingDimension::NodePrecedence>>
1528 node_index_to_precedences;
1529 const std::vector<RoutingDimension::NodePrecedence>& node_precedences =
1530 dimension_.GetNodePrecedences();
1531 if (!node_precedences.empty()) {
1532 node_index_to_precedences.resize(initial_cumul_.size());
1533 for (const auto& node_precedence : node_precedences) {
1534 node_index_to_precedences[node_precedence.first_node].push_back(
1535 node_precedence);
1536 node_index_to_precedences[node_precedence.second_node].push_back(
1537 node_precedence);
1538 }
1539 }
1540 return node_index_to_precedences;
1541}
1542
1543std::vector<std::vector<DimensionValues::VehicleBreak>>
1544PathCumulFilter::ExtractInitialVehicleBreaks() const {
1545 const int num_vehicles = routing_model_.vehicles();
1546 std::vector<std::vector<VehicleBreak>> vehicle_breaks(num_vehicles);
1547 if (!dimension_.HasBreakConstraints()) return vehicle_breaks;
1548 for (int v = 0; v < num_vehicles; ++v) {
1549 for (const IntervalVar* br : dimension_.GetBreakIntervalsOfVehicle(v)) {
1550 vehicle_breaks[v].push_back(
1551 {.start = {.min = br->StartMin(), .max = br->StartMax()},
1552 .end = {.min = br->EndMin(), .max = br->EndMax()},
1553 .duration = {.min = br->DurationMin(), .max = br->DurationMax()},
1554 .is_performed = {.min = br->MustBePerformed(),
1555 .max = br->MayBePerformed()}});
1556 }
1557 }
1558 return vehicle_breaks;
1559}
1560
1561PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model,
1562 const RoutingDimension& dimension,
1563 bool propagate_own_objective_value,
1564 bool filter_objective_cost,
1565 bool may_use_optimizers)
1566 : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(),
1567 routing_model.GetPathsMetadata()),
1568 routing_model_(routing_model),
1569 dimension_(dimension),
1570 initial_cumul_(ExtractInitialCumulIntervals()),
1571 initial_slack_(ExtractInitialSlackIntervals()),
1572 initial_vehicle_breaks_(ExtractInitialVehicleBreaks()),
1573 evaluators_(ExtractEvaluators()),
1574
1575 path_capacities_(dimension.vehicle_capacities()),
1576 path_span_upper_bounds_(dimension.vehicle_span_upper_bounds()),
1577 path_total_slack_cost_coefficients_(
1578 SumOfVectors(dimension.vehicle_span_cost_coefficients(),
1579 dimension.vehicle_slack_cost_coefficients())),
1580 global_span_cost_coefficient_(dimension.global_span_cost_coefficient()),
1581 global_span_cost_(0),
1582 cumul_soft_upper_bounds_(ExtractCumulSoftUpperBounds()),
1583 cumul_soft_lower_bounds_(ExtractCumulSoftLowerBounds()),
1584 cumul_piecewise_linear_costs_(ExtractCumulPiecewiseLinearCosts()),
1585 has_forbidden_intervals_(
1586 absl::c_any_of(dimension.forbidden_intervals(),
1587 [](const SortedDisjointIntervalList& intervals) {
1588 return intervals.NumIntervals() > 0;
1589 })),
1590
1591 dimension_values_(routing_model.vehicles(), dimension.cumuls().size()),
1592 cost_of_path_(NumPaths(), 0),
1593 synchronized_objective_value_(0),
1594 accepted_objective_value_(0),
1595 pickup_rank_and_alternative_index_of_pair_(
1596 routing_model_.GetPickupAndDeliveryPairs().size(), {-1, -1}),
1597 node_index_to_precedences_(ExtractNodeIndexToPrecedences()),
1598 location_of_node_(dimension.cumuls().size(), {-1, -1}),
1599 name_(dimension.name()),
1600 lp_optimizer_(routing_model.GetMutableLocalCumulLPOptimizer(dimension)),
1601 mp_optimizer_(routing_model.GetMutableLocalCumulMPOptimizer(dimension)),
1602 path_accessor_([this](int64_t node) { return GetNext(node); }),
1603 filter_objective_cost_(filter_objective_cost),
1604 may_use_optimizers_(may_use_optimizers),
1605 propagate_own_objective_value_(propagate_own_objective_value) {
1606#ifndef NDEBUG
1607 for (int vehicle = 0; vehicle < routing_model.vehicles(); vehicle++) {
1608 if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) {
1609 DCHECK_NE(lp_optimizer_, nullptr);
1610 DCHECK_NE(mp_optimizer_, nullptr);
1611 }
1612 }
1613#endif // NDEBUG
1614}
1615
1616bool PathCumulFilter::PropagateTransits(int path) {
1617 if (has_forbidden_intervals_) {
1618 return PropagateTransitsWithForbiddenIntervals(path);
1619 } else {
1620 return PropagateTransitsWithoutForbiddenIntervals(path);
1621 }
1622}
1623
1624bool PathCumulFilter::PropagateTransitsWithForbiddenIntervals(int path) {
1625 DCHECK_LT(0, dimension_values_.NumNodes(path));
1626 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1627 absl::Span<const Interval> transits = dimension_values_.Transits(path);
1628 absl::Span<const int> nodes = dimension_values_.Nodes(path);
1629 const int num_nodes = dimension_values_.NumNodes(path);
1630 auto reduce_to_allowed_interval = [this](Interval& interval,
1631 int node) -> bool {
1632 DCHECK(!interval.IsEmpty());
1633 interval.min = dimension_.GetFirstPossibleGreaterOrEqualValueForNode(
1634 node, interval.min);
1635 if (interval.IsEmpty()) return false;
1636 interval.max =
1637 dimension_.GetLastPossibleLessOrEqualValueForNode(node, interval.max);
1638 return !interval.IsEmpty();
1639 };
1640 // Propagate from start to end.
1641 if (!reduce_to_allowed_interval(cumuls[0], nodes[0])) return false;
1642 Interval cumul = cumuls[0];
1643 for (int r = 1; r < num_nodes; ++r) {
1644 cumul.Add(transits[r - 1]);
1645 if (!cumul.IntersectWith(cumuls[r])) return false;
1646 if (!reduce_to_allowed_interval(cumul, nodes[r])) return false;
1647 cumuls[r] = cumul;
1648 }
1649 // Propagate from end to start.
1650 for (int r = num_nodes - 2; r >= 0; --r) {
1651 cumul.Subtract(transits[r]);
1652 if (!cumul.IntersectWith(cumuls[r])) return false;
1653 if (!reduce_to_allowed_interval(cumul, nodes[r])) return false;
1654 cumuls[r] = cumul;
1655 }
1656 return true;
1657}
1658
1659bool PathCumulFilter::PropagateTransitsWithoutForbiddenIntervals(int path) {
1660 DCHECK_LT(0, dimension_values_.NumNodes(path));
1661 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1662 absl::Span<const Interval> transits = dimension_values_.Transits(path);
1663 const int num_nodes = dimension_values_.NumNodes(path);
1664 // Propagate from start to end.
1665 Interval cumul = cumuls.front();
1666 for (int r = 1; r < num_nodes; ++r) {
1667 cumul.Add(transits[r - 1]);
1668 if (!cumul.IntersectWith(cumuls[r])) return false;
1669 cumuls[r] = cumul;
1670 }
1671 // Propagate from end to start.
1672 for (int r = num_nodes - 2; r >= 0; --r) {
1673 cumul.Subtract(transits[r]);
1674 if (!cumul.IntersectWith(cumuls[r])) return false;
1675 cumuls[r] = cumul;
1676 }
1677 return true;
1678}
1679
1680bool PathCumulFilter::PropagateSpan(int path) {
1681 absl::Span<const int64_t> travel_sums = dimension_values_.TravelSums(path);
1682 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1683 // Copy values to make it clear to the compiler we are working on different
1684 // values, we'll commit them back at the end of the modifications.
1685 Interval start = cumuls.front();
1686 Interval end = cumuls.back();
1687 Interval span = dimension_values_.Span(path);
1688 if (!span.IncreaseMin(travel_sums.back())) return false;
1689 // We propagate equation: end - start - span = 0.
1690 // In the <= 0 part of this equation, consider two of the terms at their min
1691 // and filter the max of the last term.
1692 if (!end.DecreaseMax(CapAdd(start.max, span.max))) return false;
1693 if (!start.IncreaseMin(CapSub(end.min, span.max))) return false;
1694 if (!span.IncreaseMin(CapSub(end.min, start.max))) return false;
1695 // In the >= 0 part of this equation, consider two of the terms at their max
1696 // and filter the min of the last term.
1697 if (!span.DecreaseMax(CapSub(end.max, start.min))) return false;
1698 if (!end.IncreaseMin(CapAdd(start.min, span.min))) return false;
1699 if (!start.DecreaseMax(CapSub(end.max, span.min))) return false;
1700 // Commit back to input data structures.
1701 cumuls.front() = start;
1702 cumuls.back() = end;
1703 dimension_values_.MutableSpan(path) = span;
1704 return true;
1705}
1706
1707bool PathCumulFilter::FillDimensionValues(int path) {
1708 // Fill nodes.
1709 int node = Start(path);
1710 dimension_values_.PushNode(node);
1711 while (node < Size()) {
1712 const int next = GetNext(node);
1713 dimension_values_.PushNode(next);
1714 node = next;
1715 }
1716 dimension_values_.MakePathFromNewNodes(path);
1717 // Copy cumul min/max data from cumul variables.
1718 const int num_nodes = dimension_values_.NumNodes(path);
1719 absl::Span<const int> nodes = dimension_values_.Nodes(path);
1720 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1721 const int64_t capacity = path_capacities_[path];
1722 for (int r = 0; r < num_nodes; ++r) {
1723 const int node = nodes[r];
1724 Interval cumul = initial_cumul_[node];
1725 if (!cumul.DecreaseMax(capacity)) return false;
1726 cumuls[r] = cumul;
1727 }
1728 // Extract travel and transit data, fill partial travel sums.
1729 // TODO(user): use evaluator class to copy travel values from
1730 // committed travels, instead of expensive calls to evaluator.
1731 absl::Span<int64_t> travels = dimension_values_.MutableTravels(path);
1732 absl::Span<Interval> transits = dimension_values_.MutableTransits(path);
1733 absl::Span<int64_t> travel_sums = dimension_values_.MutableTravelSums(path);
1734 const auto& evaluator = *evaluators_[path];
1735 int64_t total_travel = 0;
1736 travel_sums[0] = 0;
1737 for (int r = 1; r < num_nodes; ++r) {
1738 const int node = nodes[r - 1];
1739 const int64_t travel = evaluator(node, nodes[r]);
1740 travels[r - 1] = travel;
1741 CapAddTo(travel, &total_travel);
1742 travel_sums[r] = total_travel;
1743 Interval transit{.min = travel, .max = travel};
1744 transit.Add(initial_slack_[node]);
1745 transits[r - 1] = transit;
1746 }
1747 if (travel_sums.back() > path_span_upper_bounds_[path]) return false;
1748 dimension_values_.MutableSpan(path) = {.min = travel_sums.back(),
1749 .max = path_span_upper_bounds_[path]};
1750 dimension_values_.MutableVehicleBreaks(path) = initial_vehicle_breaks_[path];
1751 return true;
1752}
1753
1754bool PathCumulFilter::PropagatePickupToDeliveryLimits(int path) {
1755 const int num_nodes = dimension_values_.NumNodes(path);
1756 absl::Span<const int> nodes = dimension_values_.Nodes(path);
1757 absl::Span<const int64_t> travel_sums = dimension_values_.TravelSums(path);
1758 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1759
1760 // For each paired node, find whether it has a sibling.
1761 // This does not check whether the pickup-delivery constraint is satisfied,
1762 // only that if some pickup alternative and delivery alternative of a pair
1763 // appear in the same path, they do respect the pickup-delivery limit.
1764 // First, allocate memory if necessary.
1765 pickup_rank_and_alternative_index_of_pair_.Revert();
1766 for (int rank = 1; rank < num_nodes - 1; ++rank) {
1767 const int node = nodes[rank];
1768 const std::optional<RoutingModel::PickupDeliveryPosition> pickup_pos =
1769 routing_model_.GetPickupPosition(node);
1770 // If node is a pickup, record its position and continue;
1771 if (pickup_pos.has_value()) {
1772 const auto [pair_index, pickup_alternative_index] = *pickup_pos;
1773 pickup_rank_and_alternative_index_of_pair_.Set(
1774 pair_index, {.rank = rank, .index = pickup_alternative_index});
1775 continue;
1776 }
1777 // Propagate the limit if:
1778 // - node is a delivery,
1779 // - a corresponding pickup is in the path,
1780 // - and there is a nontrivial limit.
1781 const std::optional<RoutingModel::PickupDeliveryPosition> delivery_pos =
1782 routing_model_.GetDeliveryPosition(node);
1783 if (!delivery_pos.has_value()) continue;
1784 const auto [pair_index, delivery_alt_index] = *delivery_pos;
1785 const auto [pickup_rank, pickup_alt_index] =
1786 pickup_rank_and_alternative_index_of_pair_.Get(pair_index);
1787 if (pickup_rank == -1) continue;
1788
1789 const int64_t limit = dimension_.GetPickupToDeliveryLimitForPair(
1790 pair_index, pickup_alt_index, delivery_alt_index);
1791 if (limit == kint64max) continue;
1792
1793 // If the total travel between pickup and delivery is larger than the
1794 // limit, we can reject immediately.
1795 const int64_t total_travel =
1796 CapSub(travel_sums[rank], travel_sums[pickup_rank]);
1797 if (total_travel > limit) return false;
1798
1799 // Otherwise, propagate cumul[rank] <= cumul[pickup_rank] + limit.
1800 if (!cumuls[rank].DecreaseMax(CapAdd(cumuls[pickup_rank].max, limit))) {
1801 return false;
1802 }
1803 if (!cumuls[pickup_rank].IncreaseMin(CapSub(cumuls[rank].min, limit))) {
1804 return false;
1805 }
1806 }
1807 return true;
1808}
1809
1810bool PathCumulFilter::PropagateVehicleBreaks(int path) {
1812 path, dimension_values_,
1813 dimension_.GetBreakDistanceDurationOfVehicle(path));
1814}
1815
1816bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/,
1817 int64_t /*chain_end*/) {
1818 const int path = GetPath(path_start);
1819 if (!FillDimensionValues(path)) return false;
1820
1821 // Filter feasibility: cumul windows, transit, span, breaks.
1822 // The first PropagateSpan() is mostly used to check feasibility of total
1823 // travel within span max, the second can tighten all start/end/span bounds.
1824 if (!PropagateSpan(path) || !PropagateTransits(path) ||
1825 !PropagateSpan(path)) {
1826 return false;
1827 }
1828 if (dimension_.HasPickupToDeliveryLimits()) {
1829 if (!PropagatePickupToDeliveryLimits(path)) return false;
1830 // Re-propagate span and transits.
1831 if (!PropagateSpan(path) || !PropagateTransits(path) ||
1832 !PropagateSpan(path)) {
1833 return false;
1834 }
1835 }
1836 if (FilterVehicleBreaks(path) && !PropagateVehicleBreaks(path)) return false;
1837
1838 // Filter costs: span (linear/quadratic/piecewise),
1839 // soft cumul windows (linear/piecewise).
1840 if (!filter_objective_cost_) return true;
1841
1842 CapSubFrom(cost_of_path_.Get(path), &accepted_objective_value_);
1843 if (routing_model_.IsEnd(GetNext(path_start)) &&
1844 !routing_model_.IsVehicleUsedWhenEmpty(path)) {
1845 cost_of_path_.Set(path, 0);
1846 return true;
1847 }
1848
1849 const absl::Span<const Interval> cumuls = dimension_values_.Cumuls(path);
1850 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
1851 const Interval span = dimension_values_.Span(path);
1852 const int64_t total_travel = dimension_values_.TravelSums(path).back();
1853 const int64_t min_total_slack = CapSub(span.min, total_travel);
1854
1855 int64_t new_path_cost = 0;
1856 // Add span and slack costs.
1857 CapAddTo(CapProd(path_total_slack_cost_coefficients_[path], min_total_slack),
1858 &new_path_cost);
1859 // Add soft span costs.
1860 if (dimension_.HasSoftSpanUpperBounds()) {
1861 const auto [bound, cost] = dimension_.GetSoftSpanUpperBoundForVehicle(path);
1862 CapAddTo(CapProd(cost, std::max<int64_t>(0, CapSub(span.min, bound))),
1863 &new_path_cost);
1864 }
1865 if (dimension_.HasQuadraticCostSoftSpanUpperBounds()) {
1866 const auto [bound, cost] =
1867 dimension_.GetQuadraticCostSoftSpanUpperBoundForVehicle(path);
1868 const int64_t violation = std::max<int64_t>(0, CapSub(span.min, bound));
1869 CapAddTo(CapProd(cost, CapProd(violation, violation)), &new_path_cost);
1870 }
1871 // Add soft cumul lower bound costs.
1872 const int num_path_nodes = dimension_values_.NumNodes(path);
1873 if (!cumul_soft_lower_bounds_.empty()) {
1874 for (int r = 0; r < num_path_nodes; ++r) {
1875 const auto [bound, coef] = cumul_soft_lower_bounds_[nodes[r]];
1876 CapAddTo(
1877 CapProd(coef, std::max<int64_t>(0, CapSub(bound, cumuls[r].max))),
1878 &new_path_cost);
1879 }
1880 }
1881 // Add soft cumul upper bound costs.
1882 if (!cumul_soft_upper_bounds_.empty()) {
1883 for (int r = 0; r < num_path_nodes; ++r) {
1884 const auto [bound, coef] = cumul_soft_upper_bounds_[nodes[r]];
1885 CapAddTo(
1886 CapProd(coef, std::max<int64_t>(0, CapSub(cumuls[r].min, bound))),
1887 &new_path_cost);
1888 }
1889 }
1890 // Add piecewise linear costs.
1891 // TODO(user): this supposes the function to be increasing,
1892 // make it correct in other cases, at least decreasing functions.
1893 if (!cumul_piecewise_linear_costs_.empty()) {
1894 for (int r = 0; r < num_path_nodes; ++r) {
1895 const PiecewiseLinearFunction* cost =
1896 cumul_piecewise_linear_costs_[nodes[r]];
1897 if (cost == nullptr) continue;
1898 CapAddTo(cost->Value(cumuls[r].min), &new_path_cost);
1899 }
1900 }
1901 // Replace committed cost of this path new cost.
1902 CapAddTo(new_path_cost, &accepted_objective_value_);
1903 cost_of_path_.Set(path, new_path_cost);
1904 return true;
1905}
1906
1907bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/,
1908 int64_t objective_max) {
1909 if (lns_detected()) return true;
1910 if (FilterPrecedences()) {
1911 // Find location of all nodes: remove committed nodes of changed paths,
1912 // then add nodes of changed paths. This removes nodes that became loops.
1913 for (const int path : dimension_values_.ChangedPaths()) {
1914 for (const int node : dimension_values_.CommittedNodes(path)) {
1915 location_of_node_.Set(node, {.path = -1, .rank = -1});
1916 }
1917 }
1918 for (const int path : dimension_values_.ChangedPaths()) {
1919 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
1920 const int num_nodes = nodes.size();
1921 for (int rank = 0; rank < num_nodes; ++rank) {
1922 location_of_node_.Set(nodes[rank], {.path = path, .rank = rank});
1923 }
1924 }
1925 // For every node in changed path, check precedences.
1926 for (const int path : dimension_values_.ChangedPaths()) {
1927 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
1928 const int num_nodes = nodes.size();
1929 for (int rank = 0; rank < num_nodes; ++rank) {
1930 const int node = nodes[rank];
1931 for (const auto [first_node, second_node, offset,
1932 performed_constraint] :
1933 node_index_to_precedences_[node]) {
1934 const auto [path1, rank1] = location_of_node_.Get(first_node);
1935 const auto [path2, rank2] = location_of_node_.Get(second_node);
1936 if (path1 == -1 && !IsVarSynced(first_node)) continue;
1937 if (path2 == -1 && !IsVarSynced(second_node)) continue;
1938 switch (RoutingDimension::GetPrecedenceStatus(
1939 path1 == -1, path2 == -1, performed_constraint)) {
1940 case RoutingDimension::PrecedenceStatus::kActive:
1941 break;
1942 case RoutingDimension::PrecedenceStatus::kInactive:
1943 continue;
1944 case RoutingDimension::PrecedenceStatus::kInvalid:
1945 return false;
1946 }
1947 DCHECK(node == first_node || node == second_node);
1948 DCHECK_EQ(first_node, dimension_values_.Nodes(path1)[rank1]);
1949 DCHECK_EQ(second_node, dimension_values_.Nodes(path2)[rank2]);
1950 // Check that cumul1 + offset <= cumul2 is feasible.
1951 if (CapAdd(dimension_values_.Cumuls(path1)[rank1].min, offset) >
1952 dimension_values_.Cumuls(path2)[rank2].max)
1953 return false;
1954 }
1955 }
1956 }
1957 }
1958 if (filter_objective_cost_ && FilterGlobalSpanCost()) {
1959 int64_t global_end_min = kint64min;
1960 int64_t global_start_max = kint64max;
1961 int64_t global_span_min = 0;
1962 // Find global values of changed paths.
1963 for (const int path : dimension_values_.ChangedPaths()) {
1964 if (dimension_values_.NumNodes(path) == 0) continue;
1965 if (dimension_values_.NumNodes(path) == 2 &&
1966 !routing_model_.IsVehicleUsedWhenEmpty(path)) {
1967 continue;
1968 }
1969 global_span_min =
1970 std::max(global_span_min, dimension_values_.Span(path).min);
1971 const int64_t end_min = dimension_values_.Cumuls(path).back().min;
1972 global_end_min = std::max(global_end_min, end_min);
1973 const int64_t start_max = dimension_values_.Cumuls(path).front().max;
1974 global_start_max = std::min(global_start_max, start_max);
1975 }
1976 // Find global values of unchanged paths, merge with those of changed paths.
1977 for (const auto& [span_min, path] : paths_by_decreasing_span_min_) {
1978 if (dimension_values_.PathHasChanged(path)) continue;
1979 global_span_min = std::max(global_span_min, span_min);
1980 break;
1981 }
1982 for (const auto& [end_min, path] : paths_by_decreasing_end_min_) {
1983 if (dimension_values_.PathHasChanged(path)) continue;
1984 global_end_min = std::max(global_end_min, end_min);
1985 break;
1986 }
1987 for (const auto& [start_max, path] : paths_by_increasing_start_max_) {
1988 if (dimension_values_.PathHasChanged(path)) continue;
1989 global_start_max = std::min(global_start_max, start_max);
1990 break;
1991 }
1992 // Compute global span min.
1993 global_span_min =
1994 std::max(global_span_min, CapSub(global_end_min, global_start_max));
1995 const int64_t global_span_cost =
1996 CapProd(global_span_min, global_span_cost_coefficient_);
1997 CapSubFrom(global_span_cost_.Get(), &accepted_objective_value_);
1998 global_span_cost_.Set(global_span_cost);
1999 CapAddTo(global_span_cost, &accepted_objective_value_);
2000 }
2001
2002 // Filtering on objective value, calling LPs and MIPs if needed.
2003 if (may_use_optimizers_ && lp_optimizer_ != nullptr &&
2004 accepted_objective_value_ <= objective_max) {
2005 std::vector<int> paths_requiring_mp_optimizer;
2006 // TODO(user): Further optimize the LPs when we find feasible-only
2007 // solutions with the original time shares, if there's time left in the end.
2008 int solve_duration_shares = dimension_values_.ChangedPaths().size();
2009 for (const int vehicle : dimension_values_.ChangedPaths()) {
2010 if (!FilterWithDimensionCumulOptimizerForVehicle(vehicle)) {
2011 continue;
2012 }
2013 int64_t path_cost_with_lp = 0;
2014 const DimensionSchedulingStatus status =
2015 lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
2016 vehicle, /*solve_duration_ratio=*/1.0 / solve_duration_shares,
2017 path_accessor_, /*resource=*/nullptr,
2018 filter_objective_cost_ ? &path_cost_with_lp : nullptr);
2019 solve_duration_shares--;
2020 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2021 return false;
2022 }
2023 // Replace previous path cost with the LP optimizer cost.
2024 if (filter_objective_cost_ &&
2025 (status == DimensionSchedulingStatus::OPTIMAL ||
2026 status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) &&
2027 path_cost_with_lp > cost_of_path_.Get(vehicle)) {
2028 CapSubFrom(cost_of_path_.Get(vehicle), &accepted_objective_value_);
2029 CapAddTo(path_cost_with_lp, &accepted_objective_value_);
2030 if (accepted_objective_value_ > objective_max) return false;
2031 cost_of_path_.Set(vehicle, path_cost_with_lp);
2032 }
2033 // Mark whether the cost could be improved by using mp_optimizer.
2034 DCHECK_NE(mp_optimizer_, nullptr);
2035 if (FilterVehicleBreaks(vehicle) ||
2036 (filter_objective_cost_ &&
2037 (FilterSoftSpanQuadraticCost(vehicle) ||
2038 status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY))) {
2039 paths_requiring_mp_optimizer.push_back(vehicle);
2040 }
2041 }
2042
2043 DCHECK_LE(accepted_objective_value_, objective_max);
2044
2045 solve_duration_shares = paths_requiring_mp_optimizer.size();
2046 for (const int vehicle : paths_requiring_mp_optimizer) {
2047 int64_t path_cost_with_mp = 0;
2048 const DimensionSchedulingStatus status =
2049 mp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
2050 vehicle, /*solve_duration_ratio=*/1.0 / solve_duration_shares,
2051 path_accessor_, /*resource=*/nullptr,
2052 filter_objective_cost_ ? &path_cost_with_mp : nullptr);
2053 solve_duration_shares--;
2054 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2055 return false;
2056 }
2057 if (filter_objective_cost_ &&
2058 status == DimensionSchedulingStatus::OPTIMAL &&
2059 path_cost_with_mp > cost_of_path_.Get(vehicle)) {
2060 CapSubFrom(cost_of_path_.Get(vehicle), &accepted_objective_value_);
2061 CapAddTo(path_cost_with_mp, &accepted_objective_value_);
2062 if (accepted_objective_value_ > objective_max) return false;
2063 cost_of_path_.Set(vehicle, path_cost_with_mp);
2064 }
2065 }
2066 }
2067 return accepted_objective_value_ <= objective_max;
2068}
2069
2070void PathCumulFilter::OnBeforeSynchronizePaths(bool synchronizing_all_paths) {
2071 if (synchronizing_all_paths) {
2072 // All paths are being synchronized, so we can reset all the data and let
2073 // OnSynchronizePathFromStart() calls recompute everything. Otherwise we let
2074 // the InitializeAcceptPath() call below revert the data structures.
2075 dimension_values_.Reset();
2076 cost_of_path_.Revert();
2077 cost_of_path_.SetAllAndCommit(0);
2078 location_of_node_.Revert();
2079 location_of_node_.SetAllAndCommit({-1, -1});
2080 global_span_cost_.SetAndCommit(0);
2081 synchronized_objective_value_ = 0; // Accept() relies on this value.
2082 }
2083
2084 accepted_objective_value_ = synchronized_objective_value_;
2085 if (HasAnySyncedPath()) {
2086 // Revert the data structures that are used to compute the bounds and
2087 // pathwise costs of the new solution incrementally.
2088 InitializeAcceptPath();
2089 }
2090}
2091
2092void PathCumulFilter::OnSynchronizePathFromStart(int64_t start) {
2093 DCHECK(IsVarSynced(start));
2094 // Using AcceptPath() to compute the bounds and pathwise costs of the new
2095 // solution incrementally.
2096 const int path = GetPath(start);
2097 const bool is_accepted = AcceptPath(start, start, End(path));
2098 DCHECK(is_accepted);
2099}
2100
2101void PathCumulFilter::OnAfterSynchronizePaths() {
2102 if (filter_objective_cost_ && FilterGlobalSpanCost()) {
2103 // TODO(user): check whether this could go into FinalizeAcceptPath(),
2104 // with a Commit() on some data structure.
2105 paths_by_decreasing_span_min_.clear();
2106 paths_by_decreasing_end_min_.clear();
2107 paths_by_increasing_start_max_.clear();
2108 for (int path = 0; path < NumPaths(); ++path) {
2109 const int num_path_nodes = dimension_values_.Nodes(path).size();
2110 if (num_path_nodes == 0 ||
2111 (num_path_nodes == 2 &&
2112 !routing_model_.IsVehicleUsedWhenEmpty(path))) {
2113 continue;
2114 }
2115 paths_by_decreasing_span_min_.push_back(
2116 {.value = dimension_values_.Span(path).min, .path = path});
2117 paths_by_decreasing_end_min_.push_back(
2118 {.value = dimension_values_.Cumuls(path).back().min, .path = path});
2119 paths_by_increasing_start_max_.push_back(
2120 {.value = dimension_values_.Cumuls(path)[0].max, .path = path});
2121 }
2122 absl::c_sort(paths_by_decreasing_span_min_, std::greater<ValuedPath>());
2123 absl::c_sort(paths_by_decreasing_end_min_, std::greater<ValuedPath>());
2124 absl::c_sort(paths_by_increasing_start_max_);
2125 }
2126 // Using FinalizeAcceptPath() to commit all data structures to this state.
2127 FinalizeAcceptPath(kint64min, kint64max);
2128 dimension_values_.Commit();
2129 cost_of_path_.Commit();
2130 global_span_cost_.Commit();
2131 location_of_node_.Commit();
2132 synchronized_objective_value_ = accepted_objective_value_;
2133}
2134
2135} // namespace
2136
2137IntVarLocalSearchFilter* MakePathCumulFilter(const RoutingDimension& dimension,
2138 bool propagate_own_objective_value,
2139 bool filter_objective_cost,
2140 bool may_use_optimizers) {
2141 RoutingModel& model = *dimension.model();
2142 return model.solver()->RevAlloc(
2143 new PathCumulFilter(model, dimension, propagate_own_objective_value,
2144 filter_objective_cost, may_use_optimizers));
2145}
2146
2147namespace {
2148
2149bool DimensionHasCumulCost(const RoutingDimension& dimension) {
2150 if (dimension.global_span_cost_coefficient() != 0) return true;
2151 if (dimension.HasSoftSpanUpperBounds()) return true;
2152 if (dimension.HasQuadraticCostSoftSpanUpperBounds()) return true;
2153 if (absl::c_any_of(dimension.vehicle_span_cost_coefficients(),
2154 [](int64_t coefficient) { return coefficient != 0; })) {
2155 return true;
2156 }
2157 if (absl::c_any_of(dimension.vehicle_slack_cost_coefficients(),
2158 [](int64_t coefficient) { return coefficient != 0; })) {
2159 return true;
2160 }
2161 for (int i = 0; i < dimension.cumuls().size(); ++i) {
2162 if (dimension.HasCumulVarSoftUpperBound(i)) return true;
2163 if (dimension.HasCumulVarSoftLowerBound(i)) return true;
2164 if (dimension.HasCumulVarPiecewiseLinearCost(i)) return true;
2165 }
2166 return false;
2167}
2168
2169bool DimensionHasPathCumulConstraint(const RoutingDimension& dimension) {
2170 if (dimension.HasBreakConstraints()) return true;
2171 if (dimension.HasPickupToDeliveryLimits()) return true;
2172 if (absl::c_any_of(
2173 dimension.vehicle_span_upper_bounds(), [](int64_t upper_bound) {
2174 return upper_bound != std::numeric_limits<int64_t>::max();
2175 })) {
2176 return true;
2177 }
2178 if (absl::c_any_of(dimension.slacks(),
2179 [](IntVar* slack) { return slack->Min() > 0; })) {
2180 return true;
2181 }
2182 const std::vector<IntVar*>& cumuls = dimension.cumuls();
2183 for (int i = 0; i < cumuls.size(); ++i) {
2184 IntVar* const cumul_var = cumuls[i];
2185 if (cumul_var->Min() > 0 &&
2186 cumul_var->Max() < std::numeric_limits<int64_t>::max() &&
2187 !dimension.model()->IsEnd(i)) {
2188 return true;
2189 }
2190 if (dimension.forbidden_intervals()[i].NumIntervals() > 0) return true;
2191 }
2192 return false;
2193}
2194
2195} // namespace
2196
2198 const PathState* path_state,
2199 const std::vector<RoutingDimension*>& dimensions,
2200 std::vector<LocalSearchFilterManager::FilterEvent>* filters) {
2201 using Interval = DimensionChecker::Interval;
2202 // For every dimension that fits, add a DimensionChecker.
2203 // Add a DimensionChecker for every dimension.
2204 for (const RoutingDimension* dimension : dimensions) {
2205 // Fill path capacities and classes.
2206 const int num_vehicles = dimension->model()->vehicles();
2207 std::vector<Interval> path_capacity(num_vehicles);
2208 std::vector<int> path_class(num_vehicles);
2209 for (int v = 0; v < num_vehicles; ++v) {
2210 const auto& vehicle_capacities = dimension->vehicle_capacities();
2211 path_capacity[v] = {0, vehicle_capacities[v]};
2212 path_class[v] = dimension->vehicle_to_class(v);
2213 }
2214 // For each class, retrieve the demands of each node.
2215 // Dimension store evaluators with a double indirection for compacity:
2216 // vehicle -> vehicle_class -> evaluator_index.
2217 // We replicate this in DimensionChecker,
2218 // except we expand evaluator_index to an array of values for all nodes.
2219 const int num_vehicle_classes =
2220 1 + *std::max_element(path_class.begin(), path_class.end());
2221 const int num_cumuls = dimension->cumuls().size();
2222 const int num_slacks = dimension->slacks().size();
2223 std::vector<std::function<Interval(int64_t, int64_t)>> transits(
2224 num_vehicle_classes, nullptr);
2225 for (int vehicle = 0; vehicle < num_vehicles; ++vehicle) {
2226 const int vehicle_class = path_class[vehicle];
2227 if (transits[vehicle_class] != nullptr) continue;
2228 const auto& unary_evaluator =
2229 dimension->GetUnaryTransitEvaluator(vehicle);
2230 if (unary_evaluator != nullptr) {
2231 transits[vehicle_class] = [&unary_evaluator, dimension, num_slacks](
2232 int64_t node, int64_t) -> Interval {
2233 if (node >= num_slacks) return {0, 0};
2234 const int64_t min_transit = unary_evaluator(node);
2235 const int64_t max_transit =
2236 CapAdd(min_transit, dimension->SlackVar(node)->Max());
2237 return {min_transit, max_transit};
2238 };
2239 } else {
2240 const auto& binary_evaluator =
2241 dimension->GetBinaryTransitEvaluator(vehicle);
2242
2243 transits[vehicle_class] = [&binary_evaluator, dimension, num_slacks](
2244 int64_t node, int64_t next) -> Interval {
2245 if (node >= num_slacks) return {0, 0};
2246 const int64_t min_transit = binary_evaluator(node, next);
2247 const int64_t max_transit =
2248 CapAdd(min_transit, dimension->SlackVar(node)->Max());
2249 return {min_transit, max_transit};
2250 };
2251 }
2252 }
2253 // Fill node capacities.
2254 std::vector<Interval> node_capacity(num_cumuls);
2255 for (int node = 0; node < num_cumuls; ++node) {
2256 const IntVar* cumul = dimension->CumulVar(node);
2257 node_capacity[node] = {cumul->Min(), cumul->Max()};
2258 }
2259 // Make the dimension checker and pass ownership to the filter.
2260 auto checker = std::make_unique<DimensionChecker>(
2261 path_state, std::move(path_capacity), std::move(path_class),
2262 std::move(transits), std::move(node_capacity));
2265 dimension->model()->solver(), std::move(checker), dimension->name());
2266 filters->push_back({filter, kAccept});
2267 }
2268}
2269
2271 const std::vector<RoutingDimension*>& dimensions,
2272 const RoutingSearchParameters& parameters, bool filter_objective_cost,
2273 bool use_chain_cumul_filter,
2274 std::vector<LocalSearchFilterManager::FilterEvent>* filters) {
2276 // Filter priority depth increases with complexity of filtering.
2277 // - Dimensions without any cumul-related costs or constraints will have a
2278 // ChainCumulFilter, lowest priority depth.
2279 // - Dimensions with cumul costs or constraints, but no global span cost
2280 // and/or precedences will have a PathCumulFilter.
2281 // - Dimensions with a global span cost coefficient and/or precedences will
2282 // have a global LP filter.
2283 const int num_dimensions = dimensions.size();
2284
2285 const bool has_dimension_optimizers =
2286 !parameters.disable_scheduling_beware_this_may_degrade_performance();
2287 std::vector<bool> use_path_cumul_filter(num_dimensions);
2288 std::vector<bool> use_cumul_bounds_propagator_filter(num_dimensions);
2289 std::vector<bool> use_global_lp_filter(num_dimensions);
2290 std::vector<bool> use_resource_assignment_filter(num_dimensions);
2291 for (int d = 0; d < num_dimensions; d++) {
2292 const RoutingDimension& dimension = *dimensions[d];
2293 const bool has_cumul_cost = DimensionHasCumulCost(dimension);
2294 use_path_cumul_filter[d] =
2295 has_cumul_cost || DimensionHasPathCumulConstraint(dimension);
2296
2297 const int num_dimension_resource_groups =
2298 dimension.model()->GetDimensionResourceGroupIndices(&dimension).size();
2299 const bool can_use_cumul_bounds_propagator_filter =
2300 !dimension.HasBreakConstraints() &&
2301 num_dimension_resource_groups == 0 &&
2302 (!filter_objective_cost || !has_cumul_cost);
2303 const bool has_precedences = !dimension.GetNodePrecedences().empty();
2304 use_global_lp_filter[d] =
2305 has_dimension_optimizers &&
2306 ((has_precedences && !can_use_cumul_bounds_propagator_filter) ||
2307 (filter_objective_cost &&
2308 dimension.global_span_cost_coefficient() > 0) ||
2309 num_dimension_resource_groups > 1);
2310
2311 use_cumul_bounds_propagator_filter[d] =
2312 has_precedences && !use_global_lp_filter[d];
2313
2314 use_resource_assignment_filter[d] =
2315 has_dimension_optimizers && num_dimension_resource_groups > 0;
2316 }
2317
2318 for (int d = 0; d < num_dimensions; d++) {
2319 const RoutingDimension& dimension = *dimensions[d];
2320 const RoutingModel& model = *dimension.model();
2321 // NOTE: We always add the [Chain|Path]CumulFilter to filter each route's
2322 // feasibility separately to try and cut bad decisions earlier in the
2323 // search, but we don't propagate the computed cost if the LPCumulFilter is
2324 // already doing it.
2325 const bool use_global_lp = use_global_lp_filter[d];
2326 const bool filter_resource_assignment = use_resource_assignment_filter[d];
2327 if (use_path_cumul_filter[d]) {
2328 PathCumulFilter* filter = model.solver()->RevAlloc(new PathCumulFilter(
2329 model, dimension, /*propagate_own_objective_value*/ !use_global_lp &&
2330 !filter_resource_assignment,
2331 filter_objective_cost, has_dimension_optimizers));
2332 const int priority = filter->UsesDimensionOptimizers() ? 1 : 0;
2333 filters->push_back({filter, kAccept, priority});
2334 } else if (use_chain_cumul_filter) {
2335 filters->push_back(
2336 {model.solver()->RevAlloc(new ChainCumulFilter(model, dimension)),
2337 kAccept, /*priority*/ 0});
2338 }
2339
2340 if (use_cumul_bounds_propagator_filter[d]) {
2341 DCHECK(!use_global_lp);
2342 DCHECK(!filter_resource_assignment);
2343 filters->push_back({MakeCumulBoundsPropagatorFilter(dimension), kAccept,
2344 /*priority*/ 2});
2345 }
2346
2347 if (filter_resource_assignment) {
2348 filters->push_back({MakeResourceAssignmentFilter(
2349 model.GetMutableLocalCumulLPOptimizer(dimension),
2350 model.GetMutableLocalCumulMPOptimizer(dimension),
2351 /*propagate_own_objective_value*/ !use_global_lp,
2352 filter_objective_cost),
2353 kAccept, /*priority*/ 3});
2354 }
2355
2356 if (use_global_lp) {
2357 filters->push_back({MakeGlobalLPCumulFilter(
2358 model.GetMutableGlobalCumulLPOptimizer(dimension),
2359 model.GetMutableGlobalCumulMPOptimizer(dimension),
2360 filter_objective_cost),
2361 kAccept, /*priority*/ 4});
2362 }
2363 }
2364}
2365
2366namespace {
2367
2368// Filter for pickup/delivery precedences.
2369class PickupDeliveryFilter : public LocalSearchFilter {
2370 public:
2371 PickupDeliveryFilter(const PathState* path_state,
2372 absl::Span<const PickupDeliveryPair> pairs,
2373 const std::vector<RoutingModel::PickupAndDeliveryPolicy>&
2374 vehicle_policies);
2375 ~PickupDeliveryFilter() override = default;
2376 bool Accept(const Assignment* /*delta*/, const Assignment* /*deltadelta*/,
2377 int64_t /*objective_min*/, int64_t /*objective_max*/) override;
2378
2379 void Reset() override;
2380 void Synchronize(const Assignment* /*assignment*/,
2381 const Assignment* /*delta*/) override;
2382 std::string DebugString() const override { return "PickupDeliveryFilter"; }
2383
2384 private:
2385 template <bool check_assigned_pairs>
2386 bool AcceptPathDispatch();
2387 template <bool check_assigned_pairs>
2388 bool AcceptPathDefault(int path);
2389 template <bool lifo, bool check_assigned_pairs>
2390 bool AcceptPathOrdered(int path);
2391 void AssignAllVisitedPairsAndLoopNodes();
2392
2393 const PathState* const path_state_;
2394 struct PairInfo {
2395 // @TODO(user): Use default member initializers once we drop C++17
2396 // support on github.
2397 bool is_paired : 1;
2398 bool is_pickup : 1;
2399 int pair_index : 30;
2400 PairInfo() : is_paired(false), pair_index(-1) {}
2401 PairInfo(bool is_paired, bool is_pickup, int pair_index)
2402 : is_paired(is_paired), is_pickup(is_pickup), pair_index(pair_index) {}
2403 };
2404 std::vector<PairInfo> pair_info_of_node_;
2405 struct PairStatus {
2406 // @TODO(user): Use default member initializers once we drop C++17
2407 // support on github.
2408 bool pickup : 1;
2409 bool delivery : 1;
2410 PairStatus() : pickup(false), delivery(false) {}
2411 };
2412 CommittableVector<PairStatus> assigned_status_of_pair_;
2413 SparseBitset<int> pair_is_open_;
2414 CommittableValue<int> num_assigned_pairs_;
2415 std::deque<int> visited_deque_;
2416 const std::vector<RoutingModel::PickupAndDeliveryPolicy> vehicle_policies_;
2417};
2418
2419PickupDeliveryFilter::PickupDeliveryFilter(
2420 const PathState* path_state, absl::Span<const PickupDeliveryPair> pairs,
2421 const std::vector<RoutingModel::PickupAndDeliveryPolicy>& vehicle_policies)
2422 : path_state_(path_state),
2423 pair_info_of_node_(path_state->NumNodes()),
2424 assigned_status_of_pair_(pairs.size(), {}),
2425 pair_is_open_(pairs.size()),
2426 num_assigned_pairs_(0),
2427 vehicle_policies_(vehicle_policies) {
2428 for (int pair_index = 0; pair_index < pairs.size(); ++pair_index) {
2429 const auto& [pickups, deliveries] = pairs[pair_index];
2430 for (const int pickup : pickups) {
2431 pair_info_of_node_[pickup] =
2432 // @TODO(user): Use aggregate initialization once we drop C++17
2433 // support on github.
2434 PairInfo{/*is_paired=*/true, /*is_pickup=*/true,
2435 /*pair_index=*/pair_index};
2436 }
2437 for (const int delivery : deliveries) {
2438 pair_info_of_node_[delivery] =
2439 // @TODO(user): Use aggregate initialization once we drop C++17
2440 // support on github.
2441 PairInfo{/*is_paired=*/true, /*is_pickup=*/false,
2442 /*pair_index=*/pair_index};
2443 }
2444 }
2445}
2446
2447void PickupDeliveryFilter::Reset() {
2448 assigned_status_of_pair_.Revert();
2449 assigned_status_of_pair_.SetAllAndCommit({});
2450 num_assigned_pairs_.SetAndCommit(0);
2451}
2452
2453void PickupDeliveryFilter::AssignAllVisitedPairsAndLoopNodes() {
2454 assigned_status_of_pair_.Revert();
2455 num_assigned_pairs_.Revert();
2456 int num_assigned_pairs = num_assigned_pairs_.Get();
2457 if (num_assigned_pairs == assigned_status_of_pair_.Size()) return;
2458 // If node is a pickup or delivery, this sets the assigned_status_of_pair_
2459 // status to true, and returns true if the whole pair *became* assigned.
2460 auto assign_node = [this](int node) -> bool {
2461 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2462 if (!is_paired) return false;
2463 bool assigned_pair = false;
2464 PairStatus assigned_status = assigned_status_of_pair_.Get(pair_index);
2465 if (is_pickup && !assigned_status.pickup) {
2466 assigned_pair = assigned_status.delivery;
2467 assigned_status.pickup = true;
2468 assigned_status_of_pair_.Set(pair_index, assigned_status);
2469 }
2470 if (!is_pickup && !assigned_status.delivery) {
2471 assigned_pair = assigned_status.pickup;
2472 assigned_status.delivery = true;
2473 assigned_status_of_pair_.Set(pair_index, assigned_status);
2474 }
2475 return assigned_pair;
2476 };
2477 for (const int path : path_state_->ChangedPaths()) {
2478 for (const int node : path_state_->Nodes(path)) {
2479 num_assigned_pairs += assign_node(node) ? 1 : 0;
2480 }
2481 }
2482 for (const int loop : path_state_->ChangedLoops()) {
2483 num_assigned_pairs += assign_node(loop) ? 1 : 0;
2484 }
2485 num_assigned_pairs_.Set(num_assigned_pairs);
2486 }
2487
2488void PickupDeliveryFilter::Synchronize(const Assignment* /*assignment*/,
2489 const Assignment* /*delta*/) {
2490 AssignAllVisitedPairsAndLoopNodes();
2491 assigned_status_of_pair_.Commit();
2492 num_assigned_pairs_.Commit();
2493 }
2494
2495bool PickupDeliveryFilter::Accept(const Assignment* /*delta*/,
2496 const Assignment* /*deltadelta*/,
2497 int64_t /*objective_min*/,
2498 int64_t /*objective_max*/) {
2499 if (path_state_->IsInvalid()) return true; // Protect against CP-LNS.
2500 AssignAllVisitedPairsAndLoopNodes();
2501 const bool check_assigned_pairs =
2502 num_assigned_pairs_.Get() < assigned_status_of_pair_.Size();
2503 if (check_assigned_pairs) {
2504 return AcceptPathDispatch<true>();
2505 } else {
2506 return AcceptPathDispatch<false>();
2507 }
2508 }
2509
2510template <bool check_assigned_pairs>
2511bool PickupDeliveryFilter::AcceptPathDispatch() {
2512 for (const int path : path_state_->ChangedPaths()) {
2513 switch (vehicle_policies_[path]) {
2514 case RoutingModel::PICKUP_AND_DELIVERY_NO_ORDER:
2515 if (!AcceptPathDefault<check_assigned_pairs>(path)) return false;
2516 break;
2517 case RoutingModel::PICKUP_AND_DELIVERY_LIFO:
2518 if (!AcceptPathOrdered<true, check_assigned_pairs>(path)) return false;
2519 break;
2520 case RoutingModel::PICKUP_AND_DELIVERY_FIFO:
2521 if (!AcceptPathOrdered<false, check_assigned_pairs>(path)) return false;
2522 break;
2523 default:
2524 continue;
2525 }
2526 }
2527 return true;
2528 }
2529
2530template <bool check_assigned_pairs>
2531bool PickupDeliveryFilter::AcceptPathDefault(int path) {
2532 pair_is_open_.SparseClearAll();
2533 int num_opened_pairs = 0;
2534 for (const int node : path_state_->Nodes(path)) {
2535 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2536 if (!is_paired) continue;
2537 if constexpr (check_assigned_pairs) {
2538 const PairStatus status = assigned_status_of_pair_.Get(pair_index);
2539 if (!status.pickup || !status.delivery) continue;
2540 }
2541 if (is_pickup) {
2542 pair_is_open_.Set(pair_index);
2543 ++num_opened_pairs;
2544 } else {
2545 if (!pair_is_open_[pair_index]) return false;
2546 pair_is_open_.Clear(pair_index);
2547 --num_opened_pairs;
2548 }
2549 }
2550 // For all visited pickup/delivery where both sides are assigned,
2551 // the whole pair must be visited.
2552 if (num_opened_pairs > 0) return false;
2553 pair_is_open_.NotifyAllClear();
2554 return true;
2555}
2556
2557template <bool lifo, bool check_assigned_pairs>
2558bool PickupDeliveryFilter::AcceptPathOrdered(int path) {
2559 visited_deque_.clear();
2560 for (const int node : path_state_->Nodes(path)) {
2561 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2562 if (!is_paired) continue;
2563 if constexpr (check_assigned_pairs) {
2564 const PairStatus status = assigned_status_of_pair_.Get(pair_index);
2565 if (!status.pickup || !status.delivery) continue;
2566 }
2567 if (is_pickup) {
2568 visited_deque_.emplace_back(pair_index);
2569 } else {
2570 if (visited_deque_.empty()) return false;
2571 if constexpr (lifo) {
2572 const int last_pair_index = visited_deque_.back();
2573 if (last_pair_index != pair_index) return false;
2574 visited_deque_.pop_back();
2575 } else {
2576 const int first_pair_index = visited_deque_.front();
2577 if (first_pair_index != pair_index) return false;
2578 visited_deque_.pop_front();
2579 }
2580 }
2581 }
2582 return visited_deque_.empty();
2583}
2584
2585} // namespace
2586
2588 const RoutingModel& routing_model, const PathState* path_state,
2589 const std::vector<PickupDeliveryPair>& pairs,
2590 const std::vector<RoutingModel::PickupAndDeliveryPolicy>&
2591 vehicle_policies) {
2592 return routing_model.solver()->RevAlloc(
2593 new PickupDeliveryFilter(path_state, pairs, vehicle_policies));
2594}
2595
2596namespace {
2597
2598// Vehicle variable filter
2599class VehicleVarFilter : public LocalSearchFilter {
2600 public:
2601 VehicleVarFilter(const RoutingModel& routing_model,
2602 const PathState* path_state);
2603 bool Accept(const Assignment* delta, const Assignment* deltadelta,
2604 int64_t objective_min, int64_t objective_max) override;
2605 void Synchronize(const Assignment* /*assignment*/,
2606 const Assignment* /*delta*/) override;
2607 std::string DebugString() const override { return "VehicleVariableFilter"; }
2608
2609 private:
2610 bool HasConstrainedVehicleVars() const;
2611
2612 const PathState* path_state_;
2613 std::vector<IntVar*> vehicle_vars_;
2614 const int num_vehicles_;
2615 bool is_disabled_;
2616};
2617
2618VehicleVarFilter::VehicleVarFilter(const RoutingModel& routing_model,
2619 const PathState* path_state)
2620 : path_state_(path_state),
2621 vehicle_vars_(routing_model.VehicleVars()),
2622 num_vehicles_(routing_model.vehicles()),
2623 is_disabled_(!HasConstrainedVehicleVars()) {}
2624
2625bool VehicleVarFilter::HasConstrainedVehicleVars() const {
2626 for (const IntVar* var : vehicle_vars_) {
2627 const int unconstrained_size = num_vehicles_ + ((var->Min() >= 0) ? 0 : 1);
2628 if (var->Size() != unconstrained_size) return true;
2629 }
2630 return false;
2631}
2632
2633void VehicleVarFilter::Synchronize(const Assignment* /*assignment*/,
2634 const Assignment* /*delta*/) {
2635 is_disabled_ = !HasConstrainedVehicleVars();
2636 }
2637
2638bool VehicleVarFilter::Accept(const Assignment* /*delta*/,
2639 const Assignment* /*deltadelta*/,
2640 int64_t /*objective_min*/,
2641 int64_t /*objective_max*/) {
2642 if (is_disabled_) return true;
2643 for (const int path : path_state_->ChangedPaths()) {
2644 // First and last chain are committed on the vehicle, no need to check.
2645 for (const PathState::Chain chain :
2646 path_state_->Chains(path).DropFirstChain().DropLastChain()) {
2647 for (const int node : chain) {
2648 if (!vehicle_vars_[node]->Contains(path)) return false;
2649 }
2650}
2651 }
2652 return true;
2653}
2654
2655} // namespace
2656
2657LocalSearchFilter* MakeVehicleVarFilter(const RoutingModel& routing_model,
2658 const PathState* path_state) {
2659 return routing_model.solver()->RevAlloc(
2660 new VehicleVarFilter(routing_model, path_state));
2661}
2662
2663namespace {
2664
2665class CumulBoundsPropagatorFilter : public IntVarLocalSearchFilter {
2666 public:
2667 explicit CumulBoundsPropagatorFilter(const RoutingDimension& dimension);
2668 bool Accept(const Assignment* delta, const Assignment* deltadelta,
2669 int64_t objective_min, int64_t objective_max) override;
2670 std::string DebugString() const override {
2671 return "CumulBoundsPropagatorFilter(" + propagator_.dimension().name() +
2672 ")";
2673 }
2674
2675 private:
2676 CumulBoundsPropagator propagator_;
2677 const int64_t cumul_offset_;
2678 SparseBitset<int64_t> delta_touched_;
2679 std::vector<int64_t> delta_nexts_;
2680};
2681
2682CumulBoundsPropagatorFilter::CumulBoundsPropagatorFilter(
2683 const RoutingDimension& dimension)
2684 : IntVarLocalSearchFilter(dimension.model()->Nexts()),
2685 propagator_(&dimension),
2686 cumul_offset_(dimension.GetGlobalOptimizerOffset()),
2687 delta_touched_(Size()),
2688 delta_nexts_(Size()) {}
2689
2690bool CumulBoundsPropagatorFilter::Accept(const Assignment* delta,
2691 const Assignment* /*deltadelta*/,
2692 int64_t /*objective_min*/,
2693 int64_t /*objective_max*/) {
2694 delta_touched_.ClearAll();
2695 for (const IntVarElement& delta_element :
2696 delta->IntVarContainer().elements()) {
2697 int64_t index = -1;
2698 if (FindIndex(delta_element.Var(), &index)) {
2699 if (!delta_element.Bound()) {
2700 // LNS detected
2701 return true;
2702 }
2703 delta_touched_.Set(index);
2704 delta_nexts_[index] = delta_element.Value();
2705 }
2706 }
2707 const auto& next_accessor = [this](int64_t index) {
2708 return delta_touched_[index] ? delta_nexts_[index] : Value(index);
2709 };
2710
2711 return propagator_.PropagateCumulBounds(next_accessor, cumul_offset_);
2712}
2713
2714} // namespace
2715
2717 const RoutingDimension& dimension) {
2718 return dimension.model()->solver()->RevAlloc(
2719 new CumulBoundsPropagatorFilter(dimension));
2720}
2721
2722namespace {
2723
2724class LPCumulFilter : public IntVarLocalSearchFilter {
2725 public:
2726 LPCumulFilter(const std::vector<IntVar*>& nexts,
2727 GlobalDimensionCumulOptimizer* optimizer,
2728 GlobalDimensionCumulOptimizer* mp_optimizer,
2729 bool filter_objective_cost);
2730 bool Accept(const Assignment* delta, const Assignment* deltadelta,
2731 int64_t objective_min, int64_t objective_max) override;
2732 int64_t GetAcceptedObjectiveValue() const override;
2733 void OnSynchronize(const Assignment* delta) override;
2734 int64_t GetSynchronizedObjectiveValue() const override;
2735 std::string DebugString() const override {
2736 return "LPCumulFilter(" + lp_optimizer_.dimension()->name() + ")";
2737 }
2738
2739 private:
2740 GlobalDimensionCumulOptimizer& lp_optimizer_;
2741 GlobalDimensionCumulOptimizer& mp_optimizer_;
2742 const bool filter_objective_cost_;
2743 int64_t synchronized_cost_without_transit_;
2744 int64_t delta_cost_without_transit_;
2745 SparseBitset<int64_t> delta_touched_;
2746 std::vector<int64_t> delta_nexts_;
2747};
2748
2749LPCumulFilter::LPCumulFilter(const std::vector<IntVar*>& nexts,
2750 GlobalDimensionCumulOptimizer* lp_optimizer,
2751 GlobalDimensionCumulOptimizer* mp_optimizer,
2752 bool filter_objective_cost)
2753 : IntVarLocalSearchFilter(nexts),
2754 lp_optimizer_(*lp_optimizer),
2755 mp_optimizer_(*mp_optimizer),
2756 filter_objective_cost_(filter_objective_cost),
2757 synchronized_cost_without_transit_(-1),
2758 delta_cost_without_transit_(-1),
2759 delta_touched_(Size()),
2760 delta_nexts_(Size()) {}
2761
2762bool LPCumulFilter::Accept(const Assignment* delta,
2763 const Assignment* /*deltadelta*/,
2764 int64_t /*objective_min*/, int64_t objective_max) {
2765 delta_touched_.ClearAll();
2766 for (const IntVarElement& delta_element :
2767 delta->IntVarContainer().elements()) {
2768 int64_t index = -1;
2769 if (FindIndex(delta_element.Var(), &index)) {
2770 if (!delta_element.Bound()) {
2771 // LNS detected
2772 return true;
2773 }
2774 delta_touched_.Set(index);
2775 delta_nexts_[index] = delta_element.Value();
2776 }
2777 }
2778 const auto& next_accessor = [this](int64_t index) {
2779 return delta_touched_[index] ? delta_nexts_[index] : Value(index);
2780 };
2781
2782 if (!filter_objective_cost_) {
2783 // No need to compute the cost of the LP, only verify its feasibility.
2784 delta_cost_without_transit_ = 0;
2785 DimensionSchedulingStatus status = lp_optimizer_.ComputeCumuls(
2786 next_accessor, {}, nullptr, nullptr, nullptr);
2787 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) {
2788 status = mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr,
2789 nullptr);
2790 }
2791 DCHECK(status != DimensionSchedulingStatus::FEASIBLE)
2792 << "FEASIBLE without filtering objective cost should be OPTIMAL";
2793 return status == DimensionSchedulingStatus::OPTIMAL;
2794 }
2795
2797 lp_optimizer_.ComputeCumulCostWithoutFixedTransits(
2798 next_accessor, &delta_cost_without_transit_);
2799
2800 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY ||
2801 status == DimensionSchedulingStatus::FEASIBLE) {
2802 const DimensionSchedulingStatus lp_status = status;
2803 int64_t mp_cost;
2804 status = mp_optimizer_.ComputeCumulCostWithoutFixedTransits(next_accessor,
2805 &mp_cost);
2806 if (lp_status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY &&
2807 status == DimensionSchedulingStatus::OPTIMAL) {
2808 // TRICKY: If the MP is only feasible, the computed cost isn't a lower
2809 // bound to the problem, so we keep the LP relaxation's lower bound
2810 // found by Glop.
2811 delta_cost_without_transit_ = mp_cost;
2812 } else if (lp_status == DimensionSchedulingStatus::FEASIBLE &&
2813 status != DimensionSchedulingStatus::INFEASIBLE) {
2814 // TRICKY: Since feasible costs are not lower bounds, we keep the lowest
2815 // of the costs between the LP-feasible and CP-SAT (feasible or optimal).
2816 delta_cost_without_transit_ =
2817 std::min(delta_cost_without_transit_, mp_cost);
2818 }
2819 }
2820
2821 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2822 delta_cost_without_transit_ = std::numeric_limits<int64_t>::max();
2823 return false;
2824 }
2825 return delta_cost_without_transit_ <= objective_max;
2826}
2827
2828int64_t LPCumulFilter::GetAcceptedObjectiveValue() const {
2829 return delta_cost_without_transit_;
2830}
2831
2832void LPCumulFilter::OnSynchronize(const Assignment* /*delta*/) {
2833 // TODO(user): Try to optimize this so the LP is not called when the last
2834 // computed delta cost corresponds to the solution being synchronized.
2835 const RoutingModel& model = *lp_optimizer_.dimension()->model();
2836 const auto& next_accessor = [this, &model](int64_t index) {
2837 return IsVarSynced(index) ? Value(index)
2838 : model.IsStart(index) ? model.End(model.VehicleIndex(index))
2839 : index;
2840 };
2841
2842 if (!filter_objective_cost_) {
2843 synchronized_cost_without_transit_ = 0;
2844 }
2846 filter_objective_cost_
2847 ? lp_optimizer_.ComputeCumulCostWithoutFixedTransits(
2848 next_accessor, &synchronized_cost_without_transit_)
2849 : lp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr,
2850 nullptr);
2851 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) {
2852 status = filter_objective_cost_
2853 ? mp_optimizer_.ComputeCumulCostWithoutFixedTransits(
2854 next_accessor, &synchronized_cost_without_transit_)
2855 : mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr,
2856 nullptr, nullptr);
2857 }
2858 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2859 // TODO(user): This should only happen if the LP/MIP solver times out.
2860 // DCHECK the fail wasn't due to an infeasible model.
2861 synchronized_cost_without_transit_ = 0;
2862 }
2863}
2864
2865int64_t LPCumulFilter::GetSynchronizedObjectiveValue() const {
2866 return synchronized_cost_without_transit_;
2867}
2868
2869} // namespace
2870
2872 GlobalDimensionCumulOptimizer* lp_optimizer,
2873 GlobalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost) {
2874 DCHECK_NE(lp_optimizer, nullptr);
2875 DCHECK_NE(mp_optimizer, nullptr);
2876 const RoutingModel& model = *lp_optimizer->dimension()->model();
2877 return model.solver()->RevAlloc(new LPCumulFilter(
2878 model.Nexts(), lp_optimizer, mp_optimizer, filter_objective_cost));
2879}
2880
2881namespace {
2882
2883using ResourceGroup = RoutingModel::ResourceGroup;
2884
2885class ResourceGroupAssignmentFilter : public BasePathFilter {
2886 public:
2887 ResourceGroupAssignmentFilter(const std::vector<IntVar*>& nexts,
2888 const ResourceGroup* resource_group,
2889 LocalDimensionCumulOptimizer* lp_optimizer,
2890 LocalDimensionCumulOptimizer* mp_optimizer,
2891 bool filter_objective_cost);
2892 bool InitializeAcceptPath() override;
2893 bool AcceptPath(int64_t path_start, int64_t, int64_t) override;
2894 bool FinalizeAcceptPath(int64_t objective_min,
2895 int64_t objective_max) override;
2896 void OnBeforeSynchronizePaths(bool synchronizing_all_paths) override;
2897 void OnSynchronizePathFromStart(int64_t start) override;
2898 void OnAfterSynchronizePaths() override;
2899
2900 int64_t GetAcceptedObjectiveValue() const override {
2901 return lns_detected() ? 0 : delta_cost_without_transit_;
2902 }
2903 int64_t GetSynchronizedObjectiveValue() const override {
2904 return synchronized_cost_without_transit_;
2905 }
2906 std::string DebugString() const override {
2907 return "ResourceGroupAssignmentFilter(" + dimension_.name() + ")";
2908 }
2909
2910 private:
2911 using RCIndex = RoutingModel::ResourceClassIndex;
2912
2913 bool VehicleRequiresResourceAssignment(
2914 int vehicle, const std::function<int64_t(int64_t)>& next_accessor,
2915 bool* is_infeasible);
2916 int64_t ComputeRouteCumulCostWithoutResourceAssignment(
2917 int vehicle, const std::function<int64_t(int64_t)>& next_accessor) const;
2918
2919 const RoutingModel& model_;
2920 const RoutingDimension& dimension_;
2921 const ResourceGroup& resource_group_;
2922 LocalDimensionCumulOptimizer* lp_optimizer_;
2923 LocalDimensionCumulOptimizer* mp_optimizer_;
2924 const bool filter_objective_cost_;
2925 bool current_synch_failed_;
2926 int64_t synchronized_cost_without_transit_;
2927 int64_t delta_cost_without_transit_;
2928 std::vector<std::vector<int64_t>> vehicle_to_resource_class_assignment_costs_;
2929 std::vector<int> vehicles_requiring_resource_assignment_;
2930 std::vector<bool> vehicle_requires_resource_assignment_;
2931 std::vector<std::vector<int64_t>>
2932 delta_vehicle_to_resource_class_assignment_costs_;
2933 std::vector<int> delta_vehicles_requiring_resource_assignment_;
2934 std::vector<bool> delta_vehicle_requires_resource_assignment_;
2935
2936 std::vector<int> bound_resource_index_of_vehicle_;
2937 util_intops::StrongVector<RCIndex, absl::flat_hash_set<int>>
2938 ignored_resources_per_class_;
2939};
2940
2941ResourceGroupAssignmentFilter::ResourceGroupAssignmentFilter(
2942 const std::vector<IntVar*>& nexts, const ResourceGroup* resource_group,
2943 LocalDimensionCumulOptimizer* lp_optimizer,
2944 LocalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost)
2945 : BasePathFilter(nexts, lp_optimizer->dimension()->cumuls().size(),
2946 lp_optimizer->dimension()->model()->GetPathsMetadata()),
2947 model_(*lp_optimizer->dimension()->model()),
2948 dimension_(*lp_optimizer->dimension()),
2949 resource_group_(*resource_group),
2950 lp_optimizer_(lp_optimizer),
2951 mp_optimizer_(mp_optimizer),
2952 filter_objective_cost_(filter_objective_cost),
2953 current_synch_failed_(false),
2954 synchronized_cost_without_transit_(-1),
2955 delta_cost_without_transit_(-1) {
2956 vehicle_to_resource_class_assignment_costs_.resize(model_.vehicles());
2957 delta_vehicle_to_resource_class_assignment_costs_.resize(model_.vehicles());
2958}
2959
2960bool ResourceGroupAssignmentFilter::InitializeAcceptPath() {
2961 delta_vehicle_to_resource_class_assignment_costs_.assign(model_.vehicles(),
2962 {});
2963 if (current_synch_failed_) {
2964 return true;
2965 }
2966 // TODO(user): Keep track of num_used_vehicles internally and compute its
2967 // new value here by only going through the touched_paths_.
2968 int num_used_vehicles = 0;
2969 const int num_resources = resource_group_.Size();
2970 for (int v : resource_group_.GetVehiclesRequiringAResource()) {
2971 if (GetNext(model_.Start(v)) != model_.End(v) ||
2972 model_.IsVehicleUsedWhenEmpty(v)) {
2973 if (++num_used_vehicles > num_resources) {
2974 return false;
2975 }
2976 }
2977 }
2978 delta_vehicle_requires_resource_assignment_ =
2979 vehicle_requires_resource_assignment_;
2980 return true;
2981}
2982
2983bool ResourceGroupAssignmentFilter::AcceptPath(int64_t path_start, int64_t,
2984 int64_t) {
2985 if (current_synch_failed_) {
2986 return true;
2987 }
2988 const int vehicle = model_.VehicleIndex(path_start);
2989 bool is_infeasible = false;
2990 delta_vehicle_requires_resource_assignment_[vehicle] =
2991 VehicleRequiresResourceAssignment(
2992 vehicle, [this](int64_t n) { return GetNext(n); }, &is_infeasible);
2993 return !is_infeasible;
2994}
2995
2996bool ResourceGroupAssignmentFilter::FinalizeAcceptPath(
2997 int64_t /*objective_min*/, int64_t objective_max) {
2998 delta_cost_without_transit_ = 0;
2999 if (current_synch_failed_) {
3000 return true;
3001 }
3002 const auto& next_accessor = [this](int64_t index) { return GetNext(index); };
3003 delta_vehicles_requiring_resource_assignment_.clear();
3004 // First sum the costs of the routes not requiring resource assignment
3005 // (cheaper computations).
3006 for (int v = 0; v < model_.vehicles(); ++v) {
3007 if (delta_vehicle_requires_resource_assignment_[v]) {
3008 delta_vehicles_requiring_resource_assignment_.push_back(v);
3009 continue;
3010 }
3011 int64_t route_cost = 0;
3012 int64_t start = model_.Start(v);
3013 if (PathStartTouched(start)) {
3014 route_cost =
3015 ComputeRouteCumulCostWithoutResourceAssignment(v, next_accessor);
3016 if (route_cost < 0) {
3017 return false;
3018 }
3019 } else if (IsVarSynced(start)) {
3020 DCHECK_EQ(vehicle_to_resource_class_assignment_costs_[v].size(), 1);
3021 route_cost = vehicle_to_resource_class_assignment_costs_[v][0];
3022 }
3023 CapAddTo(route_cost, &delta_cost_without_transit_);
3024 if (delta_cost_without_transit_ > objective_max) {
3025 return false;
3026 }
3027 }
3028 // Recompute the assignment costs to resources for touched paths requiring
3029 // resource assignment.
3030 for (int64_t start : GetTouchedPathStarts()) {
3031 const int vehicle = model_.VehicleIndex(start);
3032 if (!delta_vehicle_requires_resource_assignment_[vehicle]) {
3033 // Already handled above.
3034 continue;
3035 }
3037 vehicle, /*solve_duration_ratio=*/1.0, resource_group_,
3038 ignored_resources_per_class_, next_accessor,
3039 dimension_.transit_evaluator(vehicle), filter_objective_cost_,
3040 lp_optimizer_, mp_optimizer_,
3041 &delta_vehicle_to_resource_class_assignment_costs_[vehicle],
3042 nullptr, nullptr)) {
3043 return false;
3044 }
3045 }
3046 const int64_t assignment_cost = ComputeBestVehicleToResourceAssignment(
3047 delta_vehicles_requiring_resource_assignment_,
3048 resource_group_.GetResourceIndicesPerClass(),
3049 ignored_resources_per_class_,
3050 /*vehicle_to_resource_class_assignment_costs=*/
3051 [this](int v) {
3052 return PathStartTouched(model_.Start(v))
3053 ? &delta_vehicle_to_resource_class_assignment_costs_[v]
3054 : &vehicle_to_resource_class_assignment_costs_[v];
3055 },
3056 nullptr);
3057 CapAddTo(assignment_cost, &delta_cost_without_transit_);
3058 return assignment_cost >= 0 && delta_cost_without_transit_ <= objective_max;
3059}
3060
3061void ResourceGroupAssignmentFilter::OnBeforeSynchronizePaths(bool) {
3062 if (!HasAnySyncedPath()) {
3063 vehicle_to_resource_class_assignment_costs_.assign(model_.vehicles(), {});
3064 }
3065 bound_resource_index_of_vehicle_.assign(model_.vehicles(), -1);
3066 vehicles_requiring_resource_assignment_.clear();
3067 vehicles_requiring_resource_assignment_.reserve(
3068 resource_group_.GetVehiclesRequiringAResource().size());
3069 vehicle_requires_resource_assignment_.assign(model_.vehicles(), false);
3070 ignored_resources_per_class_.assign(resource_group_.GetResourceClassesCount(),
3071 absl::flat_hash_set<int>());
3072
3073 for (int v : resource_group_.GetVehiclesRequiringAResource()) {
3074 const int64_t start = model_.Start(v);
3075 if (!IsVarSynced(start)) {
3076 continue;
3077 }
3078 vehicle_requires_resource_assignment_[v] =
3079 VehicleRequiresResourceAssignment(
3080 v, [this](int64_t n) { return Value(n); }, &current_synch_failed_);
3081 if (vehicle_requires_resource_assignment_[v]) {
3082 vehicles_requiring_resource_assignment_.push_back(v);
3083 }
3084 if (current_synch_failed_) {
3085 return;
3086 }
3087 }
3088 synchronized_cost_without_transit_ = 0;
3089}
3090
3091void ResourceGroupAssignmentFilter::OnSynchronizePathFromStart(int64_t start) {
3092 if (current_synch_failed_) return;
3093 DCHECK(IsVarSynced(start));
3094 const int v = model_.VehicleIndex(start);
3095 const auto& next_accessor = [this](int64_t index) { return Value(index); };
3096 if (!vehicle_requires_resource_assignment_[v]) {
3097 const int64_t route_cost =
3098 ComputeRouteCumulCostWithoutResourceAssignment(v, next_accessor);
3099 if (route_cost < 0) {
3100 current_synch_failed_ = true;
3101 return;
3102 }
3103 CapAddTo(route_cost, &synchronized_cost_without_transit_);
3104 vehicle_to_resource_class_assignment_costs_[v] = {route_cost};
3105 return;
3106 }
3107 // NOTE(user): Even if filter_objective_cost_ is false, we
3108 // still need to call ComputeVehicleToResourceClassAssignmentCosts() for every
3109 // vehicle requiring resource assignment to keep track of whether or not a
3110 // given vehicle-to-resource-class assignment is possible by storing 0 or -1
3111 // in vehicle_to_resource_class_assignment_costs_.
3112 // TODO(user): Adjust the 'solve_duration_ratio' below.
3114 v, /*solve_duration_ratio=*/1.0, resource_group_,
3115 ignored_resources_per_class_, next_accessor,
3116 dimension_.transit_evaluator(v), filter_objective_cost_,
3117 lp_optimizer_, mp_optimizer_,
3118 &vehicle_to_resource_class_assignment_costs_[v], nullptr, nullptr)) {
3119 vehicle_to_resource_class_assignment_costs_[v].assign(
3120 resource_group_.GetResourceClassesCount(), -1);
3121 current_synch_failed_ = true;
3122 }
3123}
3124
3125void ResourceGroupAssignmentFilter::OnAfterSynchronizePaths() {
3126 if (current_synch_failed_) {
3127 synchronized_cost_without_transit_ = 0;
3128 return;
3129 }
3130 if (!filter_objective_cost_) {
3131 DCHECK_EQ(synchronized_cost_without_transit_, 0);
3132 return;
3133 }
3134 const int64_t assignment_cost = ComputeBestVehicleToResourceAssignment(
3135 vehicles_requiring_resource_assignment_,
3136 resource_group_.GetResourceIndicesPerClass(),
3137 ignored_resources_per_class_,
3138 [this](int v) { return &vehicle_to_resource_class_assignment_costs_[v]; },
3139 nullptr);
3140 if (assignment_cost < 0) {
3141 synchronized_cost_without_transit_ = 0;
3142 current_synch_failed_ = true;
3143 } else {
3144 DCHECK_GE(synchronized_cost_without_transit_, 0);
3145 CapAddTo(assignment_cost, &synchronized_cost_without_transit_);
3146 }
3147}
3148
3149bool ResourceGroupAssignmentFilter::VehicleRequiresResourceAssignment(
3150 int vehicle, const std::function<int64_t(int64_t)>& next_accessor,
3151 bool* is_infeasible) {
3152 *is_infeasible = false;
3153 if (!resource_group_.VehicleRequiresAResource(vehicle)) return false;
3154 const IntVar* res_var = model_.ResourceVar(vehicle, resource_group_.Index());
3155 if (!model_.IsVehicleUsedWhenEmpty(vehicle) &&
3156 next_accessor(model_.Start(vehicle)) == model_.End(vehicle)) {
3157 if (res_var->Bound() && res_var->Value() >= 0) {
3158 // Vehicle with a resource (force-)assigned to it cannot be unused.
3159 *is_infeasible = true;
3160 }
3161 return false;
3162 }
3163 // Vehicle is used.
3164 if (res_var->Bound()) {
3165 // No need to do resource assignment for this vehicle.
3166 const int res = res_var->Value();
3167 if (res < 0) {
3168 // Vehicle has a negative resource index enforced but is used.
3169 *is_infeasible = true;
3170 } else {
3171 bound_resource_index_of_vehicle_[vehicle] = res;
3172 const RCIndex rc = resource_group_.GetResourceClassIndex(res);
3173 ignored_resources_per_class_[rc].insert(res);
3174 }
3175 return false;
3176 }
3177 // Vehicle is used and res_var isn't bound.
3178 return true;
3179}
3180
3181int64_t
3182ResourceGroupAssignmentFilter::ComputeRouteCumulCostWithoutResourceAssignment(
3183 int vehicle, const std::function<int64_t(int64_t)>& next_accessor) const {
3184 if (next_accessor(model_.Start(vehicle)) == model_.End(vehicle) &&
3185 !model_.IsVehicleUsedWhenEmpty(vehicle)) {
3186 return 0;
3187 }
3188 using Resource = RoutingModel::ResourceGroup::Resource;
3189 const Resource* resource = nullptr;
3190 if (resource_group_.VehicleRequiresAResource(vehicle)) {
3191 DCHECK_GE(bound_resource_index_of_vehicle_[vehicle], 0);
3192 resource =
3193 &resource_group_.GetResource(bound_resource_index_of_vehicle_[vehicle]);
3194 }
3195 int64_t route_cost = 0;
3196 const DimensionSchedulingStatus status =
3197 lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
3198 vehicle, /*solve_duration_ratio=*/1.0, next_accessor, resource,
3199 filter_objective_cost_ ? &route_cost : nullptr);
3200 switch (status) {
3201 case DimensionSchedulingStatus::INFEASIBLE:
3202 return -1;
3203 case DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY:
3204 if (mp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
3205 vehicle, /*solve_duration_ratio=*/1.0, next_accessor, resource,
3206 filter_objective_cost_ ? &route_cost : nullptr) ==
3207 DimensionSchedulingStatus::INFEASIBLE) {
3208 return -1;
3209 }
3210 break;
3211 default:
3212 DCHECK(status == DimensionSchedulingStatus::OPTIMAL ||
3213 status == DimensionSchedulingStatus::FEASIBLE);
3214 }
3215 return route_cost;
3216}
3217
3218// ResourceAssignmentFilter
3219class ResourceAssignmentFilter : public LocalSearchFilter {
3220 public:
3221 ResourceAssignmentFilter(const std::vector<IntVar*>& nexts,
3222 LocalDimensionCumulOptimizer* optimizer,
3223 LocalDimensionCumulOptimizer* mp_optimizer,
3224 bool propagate_own_objective_value,
3225 bool filter_objective_cost);
3226 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3227 int64_t objective_min, int64_t objective_max) override;
3228 void Synchronize(const Assignment* assignment,
3229 const Assignment* delta) override;
3230
3231 int64_t GetAcceptedObjectiveValue() const override {
3232 return propagate_own_objective_value_ ? delta_cost_ : 0;
3233 }
3234 int64_t GetSynchronizedObjectiveValue() const override {
3235 return propagate_own_objective_value_ ? synchronized_cost_ : 0;
3236 }
3237 std::string DebugString() const override {
3238 return "ResourceAssignmentFilter(" + dimension_name_ + ")";
3239 }
3240
3241 private:
3242 std::vector<IntVarLocalSearchFilter*> resource_group_assignment_filters_;
3243 int64_t synchronized_cost_;
3244 int64_t delta_cost_;
3245 const bool propagate_own_objective_value_;
3246 const std::string dimension_name_;
3247};
3248
3249ResourceAssignmentFilter::ResourceAssignmentFilter(
3250 const std::vector<IntVar*>& nexts,
3251 LocalDimensionCumulOptimizer* lp_optimizer,
3252 LocalDimensionCumulOptimizer* mp_optimizer,
3253 bool propagate_own_objective_value, bool filter_objective_cost)
3254 : propagate_own_objective_value_(propagate_own_objective_value),
3255 dimension_name_(lp_optimizer->dimension()->name()) {
3256 const RoutingModel& model = *lp_optimizer->dimension()->model();
3257 for (const auto& resource_group : model.GetResourceGroups()) {
3258 resource_group_assignment_filters_.push_back(
3259 model.solver()->RevAlloc(new ResourceGroupAssignmentFilter(
3260 nexts, resource_group.get(), lp_optimizer, mp_optimizer,
3261 filter_objective_cost)));
3262 }
3263}
3264
3265bool ResourceAssignmentFilter::Accept(const Assignment* delta,
3266 const Assignment* deltadelta,
3267 int64_t objective_min,
3268 int64_t objective_max) {
3269 delta_cost_ = 0;
3270 for (LocalSearchFilter* group_filter : resource_group_assignment_filters_) {
3271 if (!group_filter->Accept(delta, deltadelta, objective_min,
3272 objective_max)) {
3273 return false;
3274 }
3275 delta_cost_ =
3276 std::max(delta_cost_, group_filter->GetAcceptedObjectiveValue());
3277 DCHECK_LE(delta_cost_, objective_max)
3278 << "ResourceGroupAssignmentFilter should return false when the "
3279 "objective_max is exceeded.";
3280 }
3281 return true;
3282}
3283
3284void ResourceAssignmentFilter::Synchronize(const Assignment* assignment,
3285 const Assignment* delta) {
3286 synchronized_cost_ = 0;
3287 for (LocalSearchFilter* group_filter : resource_group_assignment_filters_) {
3288 group_filter->Synchronize(assignment, delta);
3289 synchronized_cost_ = std::max(
3290 synchronized_cost_, group_filter->GetSynchronizedObjectiveValue());
3291 }
3292}
3293
3294} // namespace
3295
3297 LocalDimensionCumulOptimizer* lp_optimizer,
3298 LocalDimensionCumulOptimizer* mp_optimizer,
3299 bool propagate_own_objective_value, bool filter_objective_cost) {
3300 const RoutingModel& model = *lp_optimizer->dimension()->model();
3301 DCHECK_NE(lp_optimizer, nullptr);
3302 DCHECK_NE(mp_optimizer, nullptr);
3303 return model.solver()->RevAlloc(new ResourceAssignmentFilter(
3304 model.Nexts(), lp_optimizer, mp_optimizer, propagate_own_objective_value,
3305 filter_objective_cost));
3306}
3307
3308namespace {
3309
3310// This filter accepts deltas for which the assignment satisfies the
3311// constraints of the Solver. This is verified by keeping an internal copy of
3312// the assignment with all Next vars and their updated values, and calling
3313// RestoreAssignment() on the assignment+delta.
3314// TODO(user): Also call the solution finalizer on variables, with the
3315// exception of Next Vars (woud fail on large instances).
3316// WARNING: In the case of mandatory nodes, when all vehicles are currently
3317// being used in the solution but uninserted nodes still remain, this filter
3318// will reject the solution, even if the node could be inserted on one of these
3319// routes, because all Next vars of vehicle starts are already instantiated.
3320// TODO(user): Avoid such false negatives.
3321
3322class CPFeasibilityFilter : public IntVarLocalSearchFilter {
3323 public:
3324 explicit CPFeasibilityFilter(RoutingModel* routing_model);
3325 ~CPFeasibilityFilter() override = default;
3326 std::string DebugString() const override { return "CPFeasibilityFilter"; }
3327 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3328 int64_t objective_min, int64_t objective_max) override;
3329 void OnSynchronize(const Assignment* delta) override;
3330
3331 private:
3332 void AddDeltaToAssignment(const Assignment* delta, Assignment* assignment);
3333
3334 static const int64_t kUnassigned;
3335 const RoutingModel* const model_;
3336 Solver* const solver_;
3337 Assignment* const assignment_;
3338 Assignment* const temp_assignment_;
3339 DecisionBuilder* const restore_;
3340 SearchLimit* const limit_;
3341};
3342
3343const int64_t CPFeasibilityFilter::kUnassigned = -1;
3344
3345CPFeasibilityFilter::CPFeasibilityFilter(RoutingModel* routing_model)
3346 : IntVarLocalSearchFilter(routing_model->Nexts()),
3347 model_(routing_model),
3348 solver_(routing_model->solver()),
3349 assignment_(solver_->MakeAssignment()),
3350 temp_assignment_(solver_->MakeAssignment()),
3351 restore_(solver_->MakeRestoreAssignment(temp_assignment_)),
3352 limit_(solver_->MakeCustomLimit(
3353 [routing_model]() { return routing_model->CheckLimit(); })) {
3354 assignment_->Add(routing_model->Nexts());
3355}
3356
3357bool CPFeasibilityFilter::Accept(const Assignment* delta,
3358 const Assignment* /*deltadelta*/,
3359 int64_t /*objective_min*/,
3360 int64_t /*objective_max*/) {
3361 temp_assignment_->Copy(assignment_);
3362 AddDeltaToAssignment(delta, temp_assignment_);
3363
3364 return solver_->Solve(restore_, limit_);
3365}
3366
3367void CPFeasibilityFilter::OnSynchronize(const Assignment* delta) {
3368 AddDeltaToAssignment(delta, assignment_);
3369}
3370
3371void CPFeasibilityFilter::AddDeltaToAssignment(const Assignment* delta,
3372 Assignment* assignment) {
3373 if (delta == nullptr) {
3374 return;
3375 }
3376 Assignment::IntContainer* const container =
3377 assignment->MutableIntVarContainer();
3378 for (const IntVarElement& delta_element :
3379 delta->IntVarContainer().elements()) {
3380 IntVar* const var = delta_element.Var();
3381 int64_t index = kUnassigned;
3382 // Ignoring variables found in the delta which are not next variables, such
3383 // as vehicle variables.
3384 if (!FindIndex(var, &index)) continue;
3385 DCHECK_EQ(var, Var(index));
3386 const int64_t value = delta_element.Value();
3387
3388 container->AddAtPosition(var, index)->SetValue(value);
3389 if (model_->IsStart(index)) {
3390 if (model_->IsEnd(value)) {
3391 // Do not restore unused routes.
3392 container->MutableElement(index)->Deactivate();
3393 } else {
3394 // Re-activate the route's start in case it was deactivated before.
3395 container->MutableElement(index)->Activate();
3396 }
3397 }
3398 }
3399}
3400
3401} // namespace
3402
3404 return routing_model->solver()->RevAlloc(
3405 new CPFeasibilityFilter(routing_model));
3406}
3407
3408PathState::PathState(int num_nodes, std::vector<int> path_start,
3409 std::vector<int> path_end)
3410 : num_nodes_(num_nodes),
3411 num_paths_(path_start.size()),
3412 num_nodes_threshold_(std::max(16, 4 * num_nodes_)) // Arbitrary value.
3413{
3414 DCHECK_EQ(path_start.size(), num_paths_);
3415 DCHECK_EQ(path_end.size(), num_paths_);
3416 for (int p = 0; p < num_paths_; ++p) {
3417 path_start_end_.push_back({path_start[p], path_end[p]});
3418 }
3419 Reset();
3420}
3421
3423 is_invalid_ = false;
3424 // Initial state is all unperformed: paths go from start to end directly.
3425 committed_index_.assign(num_nodes_, -1);
3426 committed_paths_.assign(num_nodes_, kUnassigned);
3427 committed_nodes_.assign(2 * num_paths_, -1);
3428 chains_.assign(num_paths_ + 1, {-1, -1}); // Reserve 1 more for sentinel.
3429 paths_.assign(num_paths_, {-1, -1});
3430 for (int path = 0; path < num_paths_; ++path) {
3431 const int index = 2 * path;
3432 const auto& [start, end] = path_start_end_[path];
3433 committed_index_[start] = index;
3434 committed_index_[end] = index + 1;
3435
3436 committed_nodes_[index] = start;
3437 committed_nodes_[index + 1] = end;
3438
3439 committed_paths_[start] = path;
3440 committed_paths_[end] = path;
3441
3442 chains_[path] = {index, index + 2};
3443 paths_[path] = {path, path + 1};
3444 }
3445 chains_[num_paths_] = {0, 0}; // Sentinel.
3446 // Nodes that are not starts or ends are not in any path, but they still need
3447 // to be represented in the committed state.
3448 for (int node = 0; node < num_nodes_; ++node) {
3449 if (committed_index_[node] != -1) continue; // node is start or end.
3450 committed_index_[node] = committed_nodes_.size();
3451 committed_nodes_.push_back(node);
3452 }
3453}
3454
3456 const PathBounds bounds = paths_[path];
3457 return PathState::ChainRange(chains_.data() + bounds.begin_index,
3458 chains_.data() + bounds.end_index,
3459 committed_nodes_.data());
3460}
3461
3463 const PathBounds bounds = paths_[path];
3464 return PathState::NodeRange(chains_.data() + bounds.begin_index,
3465 chains_.data() + bounds.end_index,
3466 committed_nodes_.data());
3467}
3468
3469void PathState::ChangePath(int path, absl::Span<const ChainBounds> chains) {
3470 changed_paths_.push_back(path);
3471 const int path_begin_index = chains_.size();
3472 chains_.insert(chains_.end(), chains.begin(), chains.end());
3473 const int path_end_index = chains_.size();
3474 paths_[path] = {path_begin_index, path_end_index};
3475 chains_.emplace_back(0, 0); // Sentinel.
3476}
3477
3478void PathState::ChangeLoops(absl::Span<const int> new_loops) {
3479 for (const int loop : new_loops) {
3480 // If the node was already a loop, do not add it.
3481 // If it was not assigned, it becomes a loop.
3482 if (Path(loop) == kLoop) continue;
3483 changed_loops_.push_back(loop);
3484 }
3485}
3486
3488 DCHECK(!IsInvalid());
3489 if (committed_nodes_.size() < num_nodes_threshold_) {
3490 IncrementalCommit();
3491 } else {
3492 FullCommit();
3493 }
3494}
3495
3497 is_invalid_ = false;
3498 chains_.resize(num_paths_ + 1); // One per path + sentinel.
3499 for (const int path : changed_paths_) {
3500 paths_[path] = {path, path + 1};
3501 }
3502 changed_paths_.clear();
3503 changed_loops_.clear();
3504}
3505
3506void PathState::CopyNewPathAtEndOfNodes(int path) {
3507 // Copy path's nodes, chain by chain.
3508 const PathBounds path_bounds = paths_[path];
3509 for (int i = path_bounds.begin_index; i < path_bounds.end_index; ++i) {
3510 const ChainBounds chain_bounds = chains_[i];
3511 committed_nodes_.insert(committed_nodes_.end(),
3512 committed_nodes_.data() + chain_bounds.begin_index,
3513 committed_nodes_.data() + chain_bounds.end_index);
3514 if (committed_paths_[committed_nodes_.back()] == path) continue;
3515 for (int i = chain_bounds.begin_index; i < chain_bounds.end_index; ++i) {
3516 const int node = committed_nodes_[i];
3517 committed_paths_[node] = path;
3518 }
3519 }
3520}
3521
3522// TODO(user): Instead of copying paths at the end systematically,
3523// reuse some of the memory when possible.
3524void PathState::IncrementalCommit() {
3525 const int new_nodes_begin = committed_nodes_.size();
3526 for (const int path : ChangedPaths()) {
3527 const int chain_begin = committed_nodes_.size();
3528 CopyNewPathAtEndOfNodes(path);
3529 const int chain_end = committed_nodes_.size();
3530 chains_[path] = {chain_begin, chain_end};
3531 }
3532 // Re-index all copied nodes.
3533 const int new_nodes_end = committed_nodes_.size();
3534 for (int i = new_nodes_begin; i < new_nodes_end; ++i) {
3535 const int node = committed_nodes_[i];
3536 committed_index_[node] = i;
3537 }
3538 // New loops stay in place: only change their path to kLoop,
3539 // committed_index_ does not change.
3540 for (const int loop : ChangedLoops()) {
3541 committed_paths_[loop] = kLoop;
3542 }
3543 // Committed part of the state is set up, erase incremental changes.
3544 Revert();
3545}
3546
3547void PathState::FullCommit() {
3548 // Copy all paths at the end of committed_nodes_,
3549 // then remove all old committed_nodes_.
3550 const int old_num_nodes = committed_nodes_.size();
3551 for (int path = 0; path < num_paths_; ++path) {
3552 const int new_path_begin = committed_nodes_.size() - old_num_nodes;
3553 CopyNewPathAtEndOfNodes(path);
3554 const int new_path_end = committed_nodes_.size() - old_num_nodes;
3555 chains_[path] = {new_path_begin, new_path_end};
3556 }
3557 committed_nodes_.erase(committed_nodes_.begin(),
3558 committed_nodes_.begin() + old_num_nodes);
3559
3560 // Reindex path nodes, then loop nodes.
3561 constexpr int kUnindexed = -1;
3562 committed_index_.assign(num_nodes_, kUnindexed);
3563 int index = 0;
3564 for (const int node : committed_nodes_) {
3565 committed_index_[node] = index++;
3566 }
3567 for (int node = 0; node < num_nodes_; ++node) {
3568 if (committed_index_[node] != kUnindexed) continue;
3569 committed_index_[node] = index++;
3570 committed_nodes_.push_back(node);
3571 }
3572 for (const int loop : ChangedLoops()) {
3573 committed_paths_[loop] = kLoop;
3574 }
3575 // Committed part of the state is set up, erase incremental changes.
3576 Revert();
3577}
3578
3579namespace {
3580
3581class PathStateFilter : public LocalSearchFilter {
3582 public:
3583 std::string DebugString() const override { return "PathStateFilter"; }
3584 PathStateFilter(std::unique_ptr<PathState> path_state,
3585 const std::vector<IntVar*>& nexts);
3586 void Relax(const Assignment* delta, const Assignment* deltadelta) override;
3587 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
3588 return true;
3589 }
3590 void Synchronize(const Assignment*, const Assignment*) override {};
3591 void Commit(const Assignment* assignment, const Assignment* delta) override;
3592 void Revert() override;
3593 void Reset() override;
3594
3595 private:
3596 // Used in arc to chain translation, see below.
3597 struct TailHeadIndices {
3598 int tail_index;
3599 int head_index;
3600 };
3601 struct IndexArc {
3602 int index;
3603 int arc;
3604 bool operator<(const IndexArc& other) const { return index < other.index; }
3605 };
3606
3607 // Translate changed_arcs_ to chains, pass to underlying PathState.
3608 void CutChains();
3609 // From changed_paths_ and changed_arcs_, fill chains_ and paths_.
3610 // Selection-based algorithm in O(n^2), to use for small change sets.
3611 void MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm();
3612 // From changed_paths_ and changed_arcs_, fill chains_ and paths_.
3613 // Generic algorithm in O(sort(n)+n), to use for larger change sets.
3614 void MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm();
3615
3616 const std::unique_ptr<PathState> path_state_;
3617 // Map IntVar* index to node, offset by the min index in nexts.
3618 std::vector<int> variable_index_to_node_;
3619 int index_offset_;
3620
3621 // Used in CutChains(), class member status avoids reallocations.
3622 std::vector<int> changed_paths_;
3623 std::vector<bool> path_has_changed_;
3624 std::vector<std::pair<int, int>> changed_arcs_;
3625 std::vector<int> changed_loops_;
3626 std::vector<TailHeadIndices> tail_head_indices_;
3627 std::vector<IndexArc> arcs_by_tail_index_;
3628 std::vector<IndexArc> arcs_by_head_index_;
3629 std::vector<int> next_arc_;
3630 std::vector<PathState::ChainBounds> path_chains_;
3631};
3632
3633PathStateFilter::PathStateFilter(std::unique_ptr<PathState> path_state,
3634 const std::vector<IntVar*>& nexts)
3635 : path_state_(std::move(path_state)) {
3636 {
3637 int min_index = std::numeric_limits<int>::max();
3638 int max_index = std::numeric_limits<int>::min();
3639 for (const IntVar* next : nexts) {
3640 const int index = next->index();
3641 min_index = std::min<int>(min_index, index);
3642 max_index = std::max<int>(max_index, index);
3643 }
3644 variable_index_to_node_.resize(max_index - min_index + 1, -1);
3645 index_offset_ = min_index;
3646 }
3647
3648 for (int node = 0; node < nexts.size(); ++node) {
3649 const int index = nexts[node]->index() - index_offset_;
3650 variable_index_to_node_[index] = node;
3651 }
3652 path_has_changed_.assign(path_state_->NumPaths(), false);
3653}
3654
3655void PathStateFilter::Relax(const Assignment* delta, const Assignment*) {
3656 path_state_->Revert();
3657 changed_arcs_.clear();
3658 for (const IntVarElement& var_value : delta->IntVarContainer().elements()) {
3659 if (var_value.Var() == nullptr) continue;
3660 const int index = var_value.Var()->index() - index_offset_;
3661 if (index < 0 || variable_index_to_node_.size() <= index) continue;
3662 const int node = variable_index_to_node_[index];
3663 if (node == -1) continue;
3664 if (var_value.Bound()) {
3665 changed_arcs_.emplace_back(node, var_value.Value());
3666 } else {
3667 path_state_->Revert();
3668 path_state_->SetInvalid();
3669 return;
3670 }
3671 }
3672 CutChains();
3673}
3674
3675void PathStateFilter::Reset() { path_state_->Reset(); }
3676
3677// The solver does not guarantee that a given Commit() corresponds to
3678// the previous Relax() (or that there has been a call to Relax()),
3679// so we replay the full change call sequence.
3680void PathStateFilter::Commit(const Assignment* assignment,
3681 const Assignment* delta) {
3682 path_state_->Revert();
3683 if (delta == nullptr || delta->Empty()) {
3684 Relax(assignment, nullptr);
3685 } else {
3686 Relax(delta, nullptr);
3687 }
3688 path_state_->Commit();
3689}
3690
3691void PathStateFilter::Revert() { path_state_->Revert(); }
3692
3693void PathStateFilter::CutChains() {
3694 // Filter out unchanged arcs from changed_arcs_,
3695 // translate changed arcs to changed arc indices.
3696 // Fill changed_paths_ while we hold node_path.
3697 for (const int path : changed_paths_) path_has_changed_[path] = false;
3698 changed_paths_.clear();
3699 tail_head_indices_.clear();
3700 changed_loops_.clear();
3701 int num_changed_arcs = 0;
3702 for (const auto [node, next] : changed_arcs_) {
3703 const int node_index = path_state_->CommittedIndex(node);
3704 const int next_index = path_state_->CommittedIndex(next);
3705 const int node_path = path_state_->Path(node);
3706 if (next != node &&
3707 (next_index != node_index + 1 || node_path < 0)) { // New arc.
3708 tail_head_indices_.push_back({node_index, next_index});
3709 changed_arcs_[num_changed_arcs++] = {node, next};
3710 if (node_path >= 0 && !path_has_changed_[node_path]) {
3711 path_has_changed_[node_path] = true;
3712 changed_paths_.push_back(node_path);
3713 }
3714 } else if (node == next && node_path != PathState::kLoop) { // New loop.
3715 changed_loops_.push_back(node);
3716 }
3717 }
3718 changed_arcs_.resize(num_changed_arcs);
3719
3720 path_state_->ChangeLoops(changed_loops_);
3721 if (tail_head_indices_.size() + changed_paths_.size() <= 8) {
3722 MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm();
3723 } else {
3724 MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm();
3725 }
3726}
3727
3728void PathStateFilter::
3729 MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm() {
3730 int num_visited_changed_arcs = 0;
3731 const int num_changed_arcs = tail_head_indices_.size();
3732 // For every path, find all its chains.
3733 for (const int path : changed_paths_) {
3734 path_chains_.clear();
3735 const auto [start_index, end_index] = path_state_->CommittedPathRange(path);
3736 int current_index = start_index;
3737 while (true) {
3738 // Look for smallest non-visited tail_index that is no smaller than
3739 // current_index.
3740 int selected_arc = -1;
3741 int selected_tail_index = std::numeric_limits<int>::max();
3742 for (int i = num_visited_changed_arcs; i < num_changed_arcs; ++i) {
3743 const int tail_index = tail_head_indices_[i].tail_index;
3744 if (current_index <= tail_index && tail_index < selected_tail_index) {
3745 selected_arc = i;
3746 selected_tail_index = tail_index;
3747 }
3748 }
3749 // If there is no such tail index, or more generally if the next chain
3750 // would be cut by end of path,
3751 // stack {current_index, end_index + 1} in chains_, and go to next path.
3752 // Otherwise, stack {current_index, tail_index+1} in chains_,
3753 // set current_index = head_index, set pair to visited.
3754 if (start_index <= current_index && current_index < end_index &&
3755 end_index <= selected_tail_index) {
3756 path_chains_.emplace_back(current_index, end_index);
3757 break;
3758 } else {
3759 path_chains_.emplace_back(current_index, selected_tail_index + 1);
3760 current_index = tail_head_indices_[selected_arc].head_index;
3761 std::swap(tail_head_indices_[num_visited_changed_arcs],
3762 tail_head_indices_[selected_arc]);
3763 ++num_visited_changed_arcs;
3764 }
3765 }
3766 path_state_->ChangePath(path, path_chains_);
3767 }
3768}
3769
3770void PathStateFilter::MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm() {
3771 // TRICKY: For each changed path, we want to generate a sequence of chains
3772 // that represents the path in the changed state.
3773 // First, notice that if we add a fake end->start arc for each changed path,
3774 // then all chains will be from the head of an arc to the tail of an arc.
3775 // A way to generate the changed chains and paths would be, for each path,
3776 // to start from a fake arc's head (the path start), go down the path until
3777 // the tail of an arc, and go to the next arc until we return on the fake arc,
3778 // enqueuing the [head, tail] chains as we go.
3779 // In turn, to do that, we need to know which arc to go to.
3780 // If we sort all heads and tails by index in two separate arrays,
3781 // the head_index and tail_index at the same rank are such that
3782 // [head_index, tail_index] is a chain. Moreover, the arc that must be visited
3783 // after head_index's arc is tail_index's arc.
3784
3785 // Add a fake end->start arc for each path.
3786 for (const int path : changed_paths_) {
3787 const auto [start_index, end_index] = path_state_->CommittedPathRange(path);
3788 tail_head_indices_.push_back({end_index - 1, start_index});
3789 }
3790
3791 // Generate pairs (tail_index, arc) and (head_index, arc) for all arcs,
3792 // sort those pairs by index.
3793 const int num_arc_indices = tail_head_indices_.size();
3794 arcs_by_tail_index_.resize(num_arc_indices);
3795 arcs_by_head_index_.resize(num_arc_indices);
3796 for (int i = 0; i < num_arc_indices; ++i) {
3797 arcs_by_tail_index_[i] = {tail_head_indices_[i].tail_index, i};
3798 arcs_by_head_index_[i] = {tail_head_indices_[i].head_index, i};
3799 }
3800 std::sort(arcs_by_tail_index_.begin(), arcs_by_tail_index_.end());
3801 std::sort(arcs_by_head_index_.begin(), arcs_by_head_index_.end());
3802 // Generate the map from arc to next arc in path.
3803 next_arc_.resize(num_arc_indices);
3804 for (int i = 0; i < num_arc_indices; ++i) {
3805 next_arc_[arcs_by_head_index_[i].arc] = arcs_by_tail_index_[i].arc;
3806 }
3807
3808 // Generate chains: for every changed path, start from its fake arc,
3809 // jump to next_arc_ until going back to fake arc,
3810 // enqueuing chains as we go.
3811 const int first_fake_arc = num_arc_indices - changed_paths_.size();
3812 for (int fake_arc = first_fake_arc; fake_arc < num_arc_indices; ++fake_arc) {
3813 path_chains_.clear();
3814 int32_t arc = fake_arc;
3815 do {
3816 const int chain_begin = tail_head_indices_[arc].head_index;
3817 arc = next_arc_[arc];
3818 const int chain_end = tail_head_indices_[arc].tail_index + 1;
3819 path_chains_.emplace_back(chain_begin, chain_end);
3820 } while (arc != fake_arc);
3821 const int path = changed_paths_[fake_arc - first_fake_arc];
3822 path_state_->ChangePath(path, path_chains_);
3823 }
3824}
3825
3826} // namespace
3827
3829 std::unique_ptr<PathState> path_state,
3830 const std::vector<IntVar*>& nexts) {
3831 PathStateFilter* filter = new PathStateFilter(std::move(path_state), nexts);
3832 return solver->RevAlloc(filter);
3833}
3834
3835namespace {
3836using EInterval = DimensionChecker::ExtendedInterval;
3837
3838constexpr int64_t kint64min = std::numeric_limits<int64_t>::min();
3839constexpr int64_t kint64max = std::numeric_limits<int64_t>::max();
3840
3841EInterval operator&(const EInterval& i1, const EInterval& i2) {
3842 return {std::max(i1.num_negative_infinity == 0 ? i1.min : kint64min,
3843 i2.num_negative_infinity == 0 ? i2.min : kint64min),
3844 std::min(i1.num_positive_infinity == 0 ? i1.max : kint64max,
3845 i2.num_positive_infinity == 0 ? i2.max : kint64max),
3846 std::min(i1.num_negative_infinity, i2.num_negative_infinity),
3847 std::min(i1.num_positive_infinity, i2.num_positive_infinity)};
3848}
3849
3850EInterval operator&=(EInterval& i1, const EInterval& i2) {
3851 i1 = i1 & i2;
3852 return i1;
3853}
3854
3855bool IsEmpty(const EInterval& interval) {
3856 const int64_t minimum_value =
3857 interval.num_negative_infinity == 0 ? interval.min : kint64min;
3858 const int64_t maximum_value =
3859 interval.num_positive_infinity == 0 ? interval.max : kint64max;
3860 return minimum_value > maximum_value;
3861}
3862
3863EInterval operator+(const EInterval& i1, const EInterval& i2) {
3864 return {CapAdd(i1.min, i2.min), CapAdd(i1.max, i2.max),
3865 i1.num_negative_infinity + i2.num_negative_infinity,
3866 i1.num_positive_infinity + i2.num_positive_infinity};
3867}
3868
3869EInterval& operator+=(EInterval& i1, const EInterval& i2) {
3870 i1 = i1 + i2;
3871 return i1;
3872}
3873
3874EInterval operator-(const EInterval& i1, const EInterval& i2) {
3875 return {CapSub(i1.min, i2.max), CapSub(i1.max, i2.min),
3876 i1.num_negative_infinity + i2.num_positive_infinity,
3877 i1.num_positive_infinity + i2.num_negative_infinity};
3878}
3879
3880// Return the interval delta such that from + delta = to.
3881// Note that the result is not the same as "to + (-from)".
3882EInterval Delta(const EInterval& from, const EInterval& to) {
3883 return {CapSub(to.min, from.min), CapSub(to.max, from.max),
3884 to.num_negative_infinity - from.num_negative_infinity,
3885 to.num_positive_infinity - from.num_positive_infinity};
3886}
3887
3888EInterval ToExtendedInterval(DimensionChecker::Interval interval) {
3889 const bool is_neg_infinity = interval.min == kint64min;
3890 const bool is_pos_infinity = interval.max == kint64max;
3891 return {is_neg_infinity ? 0 : interval.min,
3892 is_pos_infinity ? 0 : interval.max, is_neg_infinity ? 1 : 0,
3893 is_pos_infinity ? 1 : 0};
3894}
3895
3896std::vector<EInterval> ToExtendedIntervals(
3897 absl::Span<const DimensionChecker::Interval> intervals) {
3898 std::vector<EInterval> extended_intervals;
3899 extended_intervals.reserve(intervals.size());
3900 for (const auto& interval : intervals) {
3901 extended_intervals.push_back(ToExtendedInterval(interval));
3902 }
3903 return extended_intervals;
3904}
3905} // namespace
3906
3908 const PathState* path_state, std::vector<Interval> path_capacity,
3909 std::vector<int> path_class,
3910 std::vector<std::function<Interval(int64_t, int64_t)>>
3911 demand_per_path_class,
3912 std::vector<Interval> node_capacity, int min_range_size_for_riq)
3913 : path_state_(path_state),
3914 path_capacity_(ToExtendedIntervals(path_capacity)),
3915 path_class_(std::move(path_class)),
3916 demand_per_path_class_(std::move(demand_per_path_class)),
3917 node_capacity_(ToExtendedIntervals(node_capacity)),
3918 index_(path_state_->NumNodes(), 0),
3919 maximum_riq_layer_size_(std::max(
3920 16, 4 * path_state_->NumNodes())), // 16 and 4 are arbitrary.
3921 min_range_size_for_riq_(min_range_size_for_riq) {
3922 const int num_nodes = path_state_->NumNodes();
3923 cached_demand_.resize(num_nodes);
3924 const int num_paths = path_state_->NumPaths();
3925 DCHECK_EQ(num_paths, path_capacity_.size());
3926 DCHECK_EQ(num_paths, path_class_.size());
3927 const int maximum_riq_exponent = MostSignificantBitPosition32(num_nodes);
3928 riq_.resize(maximum_riq_exponent + 1);
3929 FullCommit();
3930}
3931
3933 if (path_state_->IsInvalid()) return true;
3934 for (const int path : path_state_->ChangedPaths()) {
3935 const EInterval path_capacity = path_capacity_[path];
3936 const int path_class = path_class_[path];
3937 // Loop invariant: except for the first chain, cumul represents the cumul
3938 // state of the last node of the previous chain, and it is nonempty.
3939 int prev_node = path_state_->Start(path);
3940 EInterval cumul = node_capacity_[prev_node] & path_capacity;
3941 if (IsEmpty(cumul)) return false;
3942
3943 for (const auto chain : path_state_->Chains(path)) {
3944 const int first_node = chain.First();
3945 const int last_node = chain.Last();
3946
3947 if (prev_node != first_node) {
3948 // Bring cumul state from last node of previous chain to first node of
3949 // current chain.
3950 const EInterval demand = ToExtendedInterval(
3951 demand_per_path_class_[path_class](prev_node, first_node));
3952 cumul += demand;
3953 cumul &= path_capacity;
3954 cumul &= node_capacity_[first_node];
3955 if (IsEmpty(cumul)) return false;
3956 prev_node = first_node;
3957 }
3958
3959 // Bring cumul state from first node to last node of the current chain.
3960 const int first_index = index_[first_node];
3961 const int last_index = index_[last_node];
3962 const int chain_path = path_state_->Path(first_node);
3963 const int chain_path_class =
3964 chain_path < 0 ? -1 : path_class_[chain_path];
3965 // Use a RIQ if the chain size is large enough;
3966 // the optimal size was found with the associated benchmark in tests,
3967 // in particular BM_DimensionChecker<ChangeSparsity::kSparse, *>.
3968 const bool chain_is_cached = chain_path_class == path_class;
3969 if (last_index - first_index > min_range_size_for_riq_ &&
3970 chain_is_cached) {
3971 UpdateCumulUsingChainRIQ(first_index, last_index, path_capacity, cumul);
3972 if (IsEmpty(cumul)) return false;
3973 prev_node = chain.Last();
3974 } else {
3975 for (const int node : chain.WithoutFirstNode()) {
3976 const EInterval demand =
3977 chain_is_cached
3978 ? cached_demand_[prev_node]
3979 : ToExtendedInterval(
3980 demand_per_path_class_[path_class](prev_node, node));
3981 cumul += demand;
3982 cumul &= node_capacity_[node];
3983 cumul &= path_capacity;
3984 if (IsEmpty(cumul)) return false;
3985 prev_node = node;
3986 }
3987 }
3988 }
3989 }
3990 return true;
3991}
3992
3994 const int current_layer_size = riq_[0].size();
3995 int change_size = path_state_->ChangedPaths().size();
3996 for (const int path : path_state_->ChangedPaths()) {
3997 for (const auto chain : path_state_->Chains(path)) {
3998 change_size += chain.NumNodes();
3999 }
4000 }
4001 if (current_layer_size + change_size <= maximum_riq_layer_size_) {
4002 IncrementalCommit();
4003 } else {
4004 FullCommit();
4005 }
4006}
4007
4008void DimensionChecker::IncrementalCommit() {
4009 for (const int path : path_state_->ChangedPaths()) {
4010 const int begin_index = riq_[0].size();
4011 AppendPathDemandsToSums(path);
4012 UpdateRIQStructure(begin_index, riq_[0].size());
4013 }
4014}
4015
4016void DimensionChecker::FullCommit() {
4017 // Clear all structures.
4018 for (auto& layer : riq_) layer.clear();
4019 // Append all paths.
4020 const int num_paths = path_state_->NumPaths();
4021 for (int path = 0; path < num_paths; ++path) {
4022 const int begin_index = riq_[0].size();
4023 AppendPathDemandsToSums(path);
4024 UpdateRIQStructure(begin_index, riq_[0].size());
4025 }
4026}
4027
4028void DimensionChecker::AppendPathDemandsToSums(int path) {
4029 // Value of forwards_demand_sums_riq_ at node_index must be the sum
4030 // of all demands of nodes from start of path to node.
4031 const int path_class = path_class_[path];
4032 EInterval demand_sum = {0, 0, 0, 0};
4033 int prev = path_state_->Start(path);
4034 int index = riq_[0].size();
4035 for (const int node : path_state_->Nodes(path)) {
4036 // Transition to current node.
4037 const EInterval demand =
4038 prev == node ? EInterval{0, 0, 0, 0}
4039 : ToExtendedInterval(
4040 demand_per_path_class_[path_class](prev, node));
4041 demand_sum += demand;
4042 cached_demand_[prev] = demand;
4043 prev = node;
4044 // Store all data of current node.
4045 index_[node] = index++;
4046 riq_[0].push_back({.cumuls_to_fst = node_capacity_[node],
4047 .tightest_tsum = demand_sum,
4048 .cumuls_to_lst = node_capacity_[node],
4049 .tsum_at_fst = demand_sum,
4050 .tsum_at_lst = demand_sum});
4051 }
4052 cached_demand_[path_state_->End(path)] = {0, 0, 0, 0};
4053}
4054
4055void DimensionChecker::UpdateRIQStructure(int begin_index, int end_index) {
4056 // The max layer is the one used by Range Intersection Query functions on
4057 // (begin_index, end_index - 1).
4058 const int max_layer =
4059 MostSignificantBitPosition32(end_index - begin_index - 1);
4060 for (int layer = 1, half_window = 1; layer <= max_layer;
4061 ++layer, half_window *= 2) {
4062 riq_[layer].resize(end_index);
4063 for (int i = begin_index + 2 * half_window - 1; i < end_index; ++i) {
4064 // The window covered by riq_[layer][i] goes from
4065 // first = i - 2 * half_window + 1 to last = i, inclusive.
4066 // Values are computed from two half-windows of the layer below,
4067 // the F-window = (i - 2 * half_window, i - half_window], and
4068 // the L-window - (i - half_window, i].
4069 const RIQNode& fw = riq_[layer - 1][i - half_window];
4070 const RIQNode& lw = riq_[layer - 1][i];
4071 const EInterval lst_to_lst = Delta(fw.tsum_at_lst, lw.tsum_at_lst);
4072 const EInterval fst_to_fst = Delta(fw.tsum_at_fst, lw.tsum_at_fst);
4073
4074 riq_[layer][i] = {
4075 .cumuls_to_fst = fw.cumuls_to_fst & lw.cumuls_to_fst - fst_to_fst,
4076 .tightest_tsum = fw.tightest_tsum & lw.tightest_tsum,
4077 .cumuls_to_lst = fw.cumuls_to_lst + lst_to_lst & lw.cumuls_to_lst,
4078 .tsum_at_fst = fw.tsum_at_fst,
4079 .tsum_at_lst = lw.tsum_at_lst};
4080 }
4081 }
4082}
4083
4084// The RIQ schema decomposes the request into two windows:
4085// - the F window covers indices [first_index, first_index + window)
4086// - the L window covers indices (last_index - window, last_index]
4087// The decomposition uses the first and last nodes of these windows.
4088void DimensionChecker::UpdateCumulUsingChainRIQ(
4089 int first_index, int last_index, const ExtendedInterval& path_capacity,
4090 ExtendedInterval& cumul) const {
4091 DCHECK_LE(0, first_index);
4092 DCHECK_LT(first_index, last_index);
4093 DCHECK_LT(last_index, riq_[0].size());
4094 const int layer = MostSignificantBitPosition32(last_index - first_index);
4095 const int window = 1 << layer;
4096 const RIQNode& fw = riq_[layer][first_index + window - 1];
4097 const RIQNode& lw = riq_[layer][last_index];
4098
4099 // Compute the set of cumul values that can reach the last node.
4100 cumul &= fw.cumuls_to_fst;
4101 cumul &= lw.cumuls_to_fst - Delta(fw.tsum_at_fst, lw.tsum_at_fst);
4102 cumul &= path_capacity -
4103 Delta(fw.tsum_at_fst, fw.tightest_tsum & lw.tightest_tsum);
4104
4105 // We need to check for emptiness before widening the interval with transit.
4106 if (IsEmpty(cumul)) return;
4107
4108 // Transit to last node.
4109 cumul += Delta(fw.tsum_at_fst, lw.tsum_at_lst);
4110
4111 // Compute the set of cumul values that are reached from first node.
4112 cumul &= fw.cumuls_to_lst + Delta(fw.tsum_at_lst, lw.tsum_at_lst);
4113 cumul &= lw.cumuls_to_lst;
4114}
4115
4116namespace {
4117
4118class DimensionFilter : public LocalSearchFilter {
4119 public:
4120 std::string DebugString() const override { return name_; }
4121 DimensionFilter(std::unique_ptr<DimensionChecker> checker,
4122 absl::string_view dimension_name)
4123 : checker_(std::move(checker)),
4124 name_(absl::StrCat("DimensionFilter(", dimension_name, ")")) {}
4125
4126 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4127 return checker_->Check();
4128 }
4129
4130 void Synchronize(const Assignment*, const Assignment*) override {
4131 checker_->Commit();
4132 }
4133
4134 private:
4135 std::unique_ptr<DimensionChecker> checker_;
4136 const std::string name_;
4137};
4138
4139} // namespace
4140
4142 Solver* solver, std::unique_ptr<DimensionChecker> checker,
4143 absl::string_view dimension_name) {
4144 DimensionFilter* filter =
4145 new DimensionFilter(std::move(checker), dimension_name);
4146 return solver->RevAlloc(filter);
4147}
4148
4150 PathState* path_state, std::vector<PathData> path_data)
4151 : path_state_(path_state), path_data_(std::move(path_data)) {}
4152
4154 for (const int path : path_state_->ChangedPaths()) {
4155 path_data_[path].start_cumul.Relax();
4156 path_data_[path].end_cumul.Relax();
4157 path_data_[path].span.Relax();
4158 }
4159}
4160
4162 for (const int path : path_state_->ChangedPaths()) {
4163 if (!path_data_[path].span.Exists()) continue;
4164 const PathData& data = path_data_[path];
4165 const int64_t total_transit = data.total_transit.Min();
4166 int64_t lb_span = data.span.Min();
4167 // Improve bounds on span/start max/end min using time windows: breaks that
4168 // must occur inside the path have their duration accumulated into
4169 // lb_span_tw, they also widen [start_max, end_min).
4170 int64_t lb_span_tw = total_transit;
4171 int64_t start_max = data.start_cumul.Max();
4172 int64_t end_min = data.end_cumul.Min();
4173 for (const auto& br : data.vehicle_breaks) {
4174 if (!br.is_performed_min) continue;
4175 if (br.start_max < end_min && start_max < br.end_min) {
4176 CapAddTo(br.duration_min, &lb_span_tw);
4177 start_max = std::min(start_max, br.start_max);
4178 end_min = std::max(end_min, br.end_min);
4179 }
4180 }
4181 lb_span = std::max({lb_span, lb_span_tw, CapSub(end_min, start_max)});
4182 // Compute num_feasible_breaks = number of breaks that may fit into route,
4183 // and [breaks_start_min, breaks_end_max) = max coverage of breaks.
4184 int64_t break_start_min = kint64max;
4185 int64_t break_end_max = kint64min;
4186 int64_t start_min = data.start_cumul.Min();
4187 start_min = std::max(start_min, CapSub(end_min, data.span.Max()));
4188 int64_t end_max = data.end_cumul.Max();
4189 end_max = std::min(end_max, CapAdd(start_max, data.span.Max()));
4190 int num_feasible_breaks = 0;
4191 for (const auto& br : data.vehicle_breaks) {
4192 if (start_min <= br.start_max && br.end_min <= end_max) {
4193 break_start_min = std::min(break_start_min, br.start_min);
4194 break_end_max = std::max(break_end_max, br.end_max);
4195 ++num_feasible_breaks;
4196 }
4197 }
4198 // Improve span/start min/end max using interbreak limits: there must be
4199 // enough breaks inside the path, so that for each limit, the union of
4200 // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end),
4201 // or [start, end) is shorter than max_interbreak.
4202 for (const auto& [max_interbreak, min_break_duration] :
4203 data.interbreak_limits) {
4204 // Minimal number of breaks depends on total transit:
4205 // 0 breaks for 0 <= total transit <= limit,
4206 // 1 break for limit + 1 <= total transit <= 2 * limit,
4207 // i breaks for i * limit + 1 <= total transit <= (i+1) * limit, ...
4208 if (max_interbreak == 0) {
4209 if (total_transit > 0) return false;
4210 continue;
4211 }
4212 int64_t min_num_breaks =
4213 std::max<int64_t>(0, (total_transit - 1) / max_interbreak);
4214 if (lb_span > max_interbreak) {
4215 min_num_breaks = std::max<int64_t>(min_num_breaks, 1);
4216 }
4217 if (min_num_breaks > num_feasible_breaks) return false;
4218 lb_span = std::max(
4219 lb_span,
4220 CapAdd(total_transit, CapProd(min_num_breaks, min_break_duration)));
4221 if (min_num_breaks > 0) {
4222 if (!data.start_cumul.SetMin(CapSub(break_start_min, max_interbreak))) {
4223 return false;
4224 }
4225 if (!data.end_cumul.SetMax(CapAdd(break_end_max, max_interbreak))) {
4226 return false;
4227 }
4228 }
4229 }
4230 if (!data.span.SetMin(lb_span)) return false;
4231 // Merge span lb information directly in start/end variables.
4232 start_max = std::min(start_max, CapSub(end_max, lb_span));
4233 if (!data.start_cumul.SetMax(start_max)) return false;
4234 end_min = std::max(end_min, CapAdd(start_min, lb_span));
4235 if (!data.end_cumul.SetMin(end_min)) return false;
4236 }
4237 return true;
4238}
4239
4240namespace {
4241
4242class LightVehicleBreaksFilter : public LocalSearchFilter {
4243 public:
4244 LightVehicleBreaksFilter(std::unique_ptr<LightVehicleBreaksChecker> checker,
4245 absl::string_view dimension_name)
4246 : checker_(std::move(checker)),
4247 name_(absl::StrCat("LightVehicleBreaksFilter(", dimension_name, ")")) {}
4248
4249 std::string DebugString() const override { return name_; }
4250
4251 void Relax(const Assignment*, const Assignment*) override {
4252 checker_->Relax();
4253 }
4254
4255 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4256 return checker_->Check();
4257 }
4258
4259 void Synchronize(const Assignment*, const Assignment*) override {
4260 checker_->Check();
4261 }
4262
4263 private:
4264 std::unique_ptr<LightVehicleBreaksChecker> checker_;
4265 const std::string name_;
4266};
4267
4268} // namespace
4269
4271 Solver* solver, std::unique_ptr<LightVehicleBreaksChecker> checker,
4272 absl::string_view dimension_name) {
4273 LightVehicleBreaksFilter* filter =
4274 new LightVehicleBreaksFilter(std::move(checker), dimension_name);
4275 return solver->RevAlloc(filter);
4276}
4277
4279 elements_.clear();
4280 tree_location_.clear();
4281 nodes_.clear();
4282 for (auto& layer : tree_layers_) layer.clear();
4283}
4284
4286 // New elements are elements_[i] for i in [begin_index, end_index).
4287 const int begin_index = tree_location_.size();
4288 const int end_index = elements_.size();
4289 DCHECK_LE(begin_index, end_index);
4290 if (begin_index >= end_index) return;
4291 // Gather all heights, sort and unique them, this makes up the list of
4292 // pivot heights of the underlying tree, with an inorder traversal.
4293 // TODO(user): investigate whether balancing the tree using the
4294 // number of occurrences of each height would be beneficial.
4295 // TODO(user): use a heap-like encoding for the binary search tree:
4296 // children of i at 2*i and 2*i+1. Better cache line utilization.
4297 const int old_node_size = nodes_.size();
4298 for (int i = begin_index; i < end_index; ++i) {
4299 nodes_.push_back({.pivot_height = elements_[i].height, .pivot_index = -1});
4300 }
4301 std::sort(nodes_.begin() + old_node_size, nodes_.end());
4302 nodes_.erase(std::unique(nodes_.begin() + old_node_size, nodes_.end()),
4303 nodes_.end());
4304
4305 // Remember location of the tree representation for this range of elements.
4306 // tree_location_ may be smaller than elements_, extend it if needed.
4307 const int new_node_size = nodes_.size();
4308 tree_location_.resize(end_index, {.node_begin = old_node_size,
4309 .node_end = new_node_size,
4310 .sequence_first = begin_index});
4311
4312 // Add and extend layers if needed.
4313 // The amount of layers needed is 1 + ceil(log(sequence size)).
4314 const int num_layers =
4315 2 + MostSignificantBitPosition32(new_node_size - old_node_size - 1);
4316 if (tree_layers_.size() <= num_layers) tree_layers_.resize(num_layers);
4317 for (int l = 0; l < num_layers; ++l) {
4318 tree_layers_[l].resize(end_index,
4319 {.prefix_sum = 0, .left_index = -1, .is_left = 0});
4320 }
4321
4322 // Fill all relevant locations of the tree, and record tree navigation
4323 // information. This recursive function has at most num_layers call depth.
4324 const auto fill_subtree = [this](auto& fill_subtree, int layer,
4325 int node_begin, int node_end,
4326 int range_begin, int range_end) {
4327 DCHECK_LT(node_begin, node_end);
4328 DCHECK_LT(range_begin, range_end);
4329 // Precompute prefix sums of range [range_begin, range_end).
4330 int64_t sum = 0;
4331 for (int i = range_begin; i < range_end; ++i) {
4332 sum += elements_[i].weight;
4333 tree_layers_[layer][i].prefix_sum = sum;
4334 }
4335 if (node_begin + 1 == node_end) return;
4336 // Range has more than one height, partition it.
4337 // Record layer l -> l+1 sequence index mapping:
4338 // - if height < pivot, record where this element will be in layer l+1.
4339 // - if height >= pivot, record where next <= pivot will be in layer l+1.
4340 const int node_mid = node_begin + (node_end - node_begin) / 2;
4341 const int64_t pivot_height = nodes_[node_mid].pivot_height;
4342 int pivot_index = range_begin;
4343 for (int i = range_begin; i < range_end; ++i) {
4344 tree_layers_[layer][i].left_index = pivot_index;
4345 tree_layers_[layer][i].is_left = elements_[i].height < pivot_height;
4346 if (elements_[i].height < pivot_height) ++pivot_index;
4347 }
4348 nodes_[node_mid].pivot_index = pivot_index;
4349 // TODO(user): stable_partition allocates memory,
4350 // find a way to fill layers without this.
4351 std::stable_partition(
4352 elements_.begin() + range_begin, elements_.begin() + range_end,
4353 [pivot_height](const auto& el) { return el.height < pivot_height; });
4354
4355 fill_subtree(fill_subtree, layer + 1, node_begin, node_mid, range_begin,
4356 pivot_index);
4357 fill_subtree(fill_subtree, layer + 1, node_mid, node_end, pivot_index,
4358 range_end);
4359 };
4360 fill_subtree(fill_subtree, 0, old_node_size, new_node_size, begin_index,
4361 end_index);
4362}
4363
4364int64_t WeightedWaveletTree::RangeSumWithThreshold(int64_t threshold_height,
4365 int begin_index,
4366 int end_index) const {
4367 DCHECK_LE(begin_index, end_index); // Range can be empty, but not reversed.
4368 DCHECK_LE(end_index, tree_location_.size());
4369 DCHECK_EQ(tree_location_.size(), elements_.size()); // No pending elements.
4370 if (begin_index >= end_index) return 0;
4371 auto [node_begin, node_end, sequence_first_index] =
4372 tree_location_[begin_index];
4373 DCHECK_EQ(tree_location_[end_index - 1].sequence_first,
4374 sequence_first_index); // Range is included in a single sequence.
4375 ElementRange range{
4376 .range_first_index = begin_index,
4377 .range_last_index = end_index - 1,
4378 .range_first_is_node_first = begin_index == sequence_first_index};
4379 // Answer in O(1) for the common case where max(heights) < threshold.
4380 if (nodes_[node_end - 1].pivot_height < threshold_height) return 0;
4381
4382 int64_t sum = 0;
4383 int64_t min_height_of_current_node = nodes_[node_begin].pivot_height;
4384 for (int l = 0; !range.Empty(); ++l) {
4385 const ElementInfo* elements = tree_layers_[l].data();
4386 if (threshold_height <= min_height_of_current_node) {
4387 // Query or subquery threshold covers all elements of this node.
4388 // This allows to be O(1) when the query's threshold is <= min(heights).
4389 sum += range.Sum(elements);
4390 return sum;
4391 } else if (node_begin + 1 == node_end) {
4392 // This node is a leaf, its height is < threshold, stop descent here.
4393 return sum;
4394 }
4395
4396 const int node_mid = node_begin + (node_end - node_begin) / 2;
4397 const auto [pivot_height, pivot_index] = nodes_[node_mid];
4398 const ElementRange right = range.RightSubRange(elements, pivot_index);
4399 if (threshold_height < pivot_height) {
4400 // All elements of the right child have their height above the threshold,
4401 // we can project the range to the right child and add the whole subrange.
4402 if (!right.Empty()) sum += right.Sum(tree_layers_[l + 1].data());
4403 // Go to the left child.
4404 range = range.LeftSubRange(elements);
4405 node_end = node_mid;
4406 } else {
4407 // Go to the right child.
4408 range = right;
4409 node_begin = node_mid;
4410 min_height_of_current_node = pivot_height;
4411 }
4412 }
4413 return sum;
4414}
4415
4417 const PathState* path_state, std::vector<int64_t> force_start_min,
4418 std::vector<int64_t> force_end_min, std::vector<int> force_class,
4419 std::vector<const std::function<int64_t(int64_t)>*> force_per_class,
4420 std::vector<int> distance_class,
4421 std::vector<const std::function<int64_t(int64_t, int64_t)>*>
4422 distance_per_class,
4423 std::vector<EnergyCost> path_energy_cost,
4424 std::vector<bool> path_has_cost_when_empty)
4425 : path_state_(path_state),
4426 force_start_min_(std::move(force_start_min)),
4427 force_end_min_(std::move(force_end_min)),
4428 force_class_(std::move(force_class)),
4429 distance_class_(std::move(distance_class)),
4430 force_per_class_(std::move(force_per_class)),
4431 distance_per_class_(std::move(distance_per_class)),
4432 path_energy_cost_(std::move(path_energy_cost)),
4433 path_has_cost_when_empty_(std::move(path_has_cost_when_empty)),
4434 maximum_range_query_size_(4 * path_state_->NumNodes()),
4435 force_rmq_index_of_node_(path_state_->NumNodes()),
4436 threshold_query_index_of_node_(path_state_->NumNodes()) {
4437 const int num_nodes = path_state_->NumNodes();
4438 cached_force_.resize(num_nodes);
4439 cached_distance_.resize(num_nodes);
4440 FullCacheAndPrecompute();
4441 committed_total_cost_ = 0;
4442 committed_path_cost_.assign(path_state_->NumPaths(), 0);
4443 const int num_paths = path_state_->NumPaths();
4444 for (int path = 0; path < num_paths; ++path) {
4445 committed_path_cost_[path] = ComputePathCost(path);
4446 CapAddTo(committed_path_cost_[path], &committed_total_cost_);
4447 }
4448 accepted_total_cost_ = committed_total_cost_;
4449}
4450
4452 if (path_state_->IsInvalid()) return true;
4453 accepted_total_cost_ = committed_total_cost_;
4454 for (const int path : path_state_->ChangedPaths()) {
4455 accepted_total_cost_ =
4456 CapSub(accepted_total_cost_, committed_path_cost_[path]);
4457 CapAddTo(ComputePathCost(path), &accepted_total_cost_);
4458 if (accepted_total_cost_ == kint64max) return false;
4459 }
4460 return true;
4461}
4462
4463void PathEnergyCostChecker::CacheAndPrecomputeRangeQueriesOfPath(int path) {
4464 // Cache force and distance evaluations,
4465 // precompute force RMQ, energy/distance threshold queries.
4466 const auto& force_evaluator = *force_per_class_[force_class_[path]];
4467 const auto& distance_evaluator = *distance_per_class_[distance_class_[path]];
4468 int force_index = force_rmq_.TableSize();
4469 int threshold_index = energy_query_.TreeSize();
4470 int64_t total_force = 0;
4471
4472 const int start_node = path_state_->Start(path);
4473 int prev_node = start_node;
4474
4475 for (const int node : path_state_->Nodes(path)) {
4476 if (prev_node != node) {
4477 const int64_t distance = distance_evaluator(prev_node, node);
4478 cached_distance_[prev_node] = distance;
4479 energy_query_.PushBack(total_force, total_force * distance);
4480 distance_query_.PushBack(total_force, distance);
4481 prev_node = node;
4482 }
4483 threshold_query_index_of_node_[node] = threshold_index++;
4484 force_rmq_.PushBack(total_force);
4485 force_rmq_index_of_node_[node] = force_index++;
4486 const int64_t force = force_evaluator(node);
4487 cached_force_[node] = force;
4488 total_force += force;
4489 }
4490 force_rmq_.MakeTableFromNewElements();
4491 energy_query_.MakeTreeFromNewElements();
4492 distance_query_.MakeTreeFromNewElements();
4493}
4494
4495void PathEnergyCostChecker::IncrementalCacheAndPrecompute() {
4496 for (const int path : path_state_->ChangedPaths()) {
4497 CacheAndPrecomputeRangeQueriesOfPath(path);
4498 }
4499}
4500
4501void PathEnergyCostChecker::FullCacheAndPrecompute() {
4502 force_rmq_.Clear();
4503 // Rebuild all paths.
4504 const int num_paths = path_state_->NumPaths();
4505 for (int path = 0; path < num_paths; ++path) {
4506 CacheAndPrecomputeRangeQueriesOfPath(path);
4507 }
4508}
4509
4511 int change_size = path_state_->ChangedPaths().size();
4512 for (const int path : path_state_->ChangedPaths()) {
4513 for (const auto chain : path_state_->Chains(path)) {
4514 change_size += chain.NumNodes();
4515 }
4516 committed_total_cost_ =
4517 CapSub(committed_total_cost_, committed_path_cost_[path]);
4518 committed_path_cost_[path] = ComputePathCost(path);
4519 CapAddTo(committed_path_cost_[path], &committed_total_cost_);
4520 }
4521
4522 const int current_layer_size = force_rmq_.TableSize();
4523 if (current_layer_size + change_size <= maximum_range_query_size_) {
4524 IncrementalCacheAndPrecompute();
4525 } else {
4526 FullCacheAndPrecompute();
4527 }
4528}
4529
4530int64_t PathEnergyCostChecker::ComputePathCost(int64_t path) const {
4531 const int path_force_class = force_class_[path];
4532 const auto& force_evaluator = *force_per_class_[path_force_class];
4533
4534 // Find minimal force at which to start.
4535 int64_t total_force = force_start_min_[path];
4536 int64_t min_force = total_force;
4537 int num_path_nodes = 0;
4538 int prev_node = path_state_->Start(path);
4539 for (const auto chain : path_state_->Chains(path)) {
4540 num_path_nodes += chain.NumNodes();
4541 // Add force needed to go from prev_node to chain.First() if needed.
4542 if (chain.First() != prev_node) {
4543 const int64_t force_to_node = force_evaluator(prev_node);
4544 CapAddTo(force_to_node, &total_force);
4545 min_force = std::min(min_force, total_force);
4546 prev_node = chain.First();
4547 }
4548
4549 // Add force needed to go from chain.First() to chain.Last().
4550 const int chain_path = path_state_->Path(chain.First());
4551 const int chain_force_class =
4552 chain_path < 0 ? -1 : force_class_[chain_path];
4553 const bool force_is_cached = chain_force_class == path_force_class;
4554 if (force_is_cached && chain.NumNodes() >= 2) {
4555 const int first_index = force_rmq_index_of_node_[chain.First()];
4556 const int last_index = force_rmq_index_of_node_[chain.Last()];
4557 // Get total force at first, last and lowest point of the chain.
4558 const int64_t first_total_force = force_rmq_.array()[first_index];
4559 const int64_t last_total_force = force_rmq_.array()[last_index];
4560 const int64_t min_total_force =
4561 force_rmq_.RangeMinimum(first_index, last_index);
4562 // Compute running minimum total force and total force at chain.Last().
4563 min_force = std::min(min_force,
4564 total_force - first_total_force + min_total_force);
4565 CapAddTo(last_total_force - first_total_force, &total_force);
4566 prev_node = chain.Last();
4567 } else {
4568 for (const int node : chain.WithoutFirstNode()) {
4569 const int64_t force = force_is_cached ? cached_force_[prev_node]
4570 : force_evaluator(prev_node);
4571 CapAddTo(force, &total_force);
4572 min_force = std::min(min_force, total_force);
4573 prev_node = node;
4574 }
4575 }
4576 }
4577 if (num_path_nodes == 2 && !path_has_cost_when_empty_[path]) return 0;
4578 // Force must be offset in order to be all of:
4579 // - >= force_start_min_[path] at start
4580 // - >= force_end_min_[path] at end
4581 // - >= 0 at all intermediate nodes
4582 // We set the accumulator to the minimal offset that allows this.
4583 total_force = std::max<int64_t>(
4584 {0, CapOpp(min_force), CapSub(force_end_min_[path], total_force)});
4585 CapAddTo(force_start_min_[path], &total_force);
4586
4587 // Compute energy, below and above threshold.
4588 const int path_distance_class = distance_class_[path];
4589 const auto& distance_evaluator = *distance_per_class_[path_distance_class];
4590 const EnergyCost& cost = path_energy_cost_[path];
4591 int64_t energy_below = 0;
4592 int64_t energy_above = 0;
4593 prev_node = path_state_->Start(path);
4594 for (const auto chain : path_state_->Chains(path)) {
4595 // Bring cost computation to first node of the chain.
4596 if (chain.First() != prev_node) {
4597 const int64_t distance = distance_evaluator(prev_node, chain.First());
4598 CapAddTo(force_evaluator(prev_node), &total_force);
4599 CapAddTo(CapProd(std::min(cost.threshold, total_force), distance),
4600 &energy_below);
4601 const int64_t force_above =
4602 std::max<int64_t>(0, CapSub(total_force, cost.threshold));
4603 CapAddTo(CapProd(force_above, distance), &energy_above);
4604 prev_node = chain.First();
4605 }
4606
4607 // Inside chain, try to reuse cached forces and distances instead of more
4608 // costly calls to evaluators.
4609 const int chain_path = path_state_->Path(chain.First());
4610 const int chain_force_class =
4611 chain_path < 0 ? -1 : force_class_[chain_path];
4612 const int chain_distance_class =
4613 chain_path < 0 ? -1 : distance_class_[chain_path];
4614 const bool force_is_cached = chain_force_class == path_force_class;
4615 const bool distance_is_cached = chain_distance_class == path_distance_class;
4616
4617 if (force_is_cached && distance_is_cached && chain.NumNodes() >= 2) {
4618 const int first_index = threshold_query_index_of_node_[chain.First()];
4619 const int last_index = threshold_query_index_of_node_[chain.Last()];
4620
4621 const int64_t zero_total_energy = energy_query_.RangeSumWithThreshold(
4622 kint64min, first_index, last_index);
4623 const int64_t total_distance = distance_query_.RangeSumWithThreshold(
4624 kint64min, first_index, last_index);
4625
4626 // In the following, zero_ values are those computed with the hypothesis
4627 // that the force at the start node is zero.
4628 // The total_force at chain.First() is in general not the same in the
4629 // candidate path and in the zero_ case. We can still query the energy and
4630 // distance totals incurred by transitions above the actual threshold
4631 // during the chain, by offsetting the queries to zero_threshold.
4632 const int64_t zero_total_force_first =
4633 force_rmq_.array()[force_rmq_index_of_node_[chain.First()]];
4634 const int64_t zero_threshold =
4635 CapSub(cost.threshold, CapSub(total_force, zero_total_force_first));
4636 // "High" transitions are those that occur with a force at or above the
4637 // threshold. "High" energy is the sum of energy values during high
4638 // transitions, same for "high" distance.
4639 const int64_t zero_high_energy = energy_query_.RangeSumWithThreshold(
4640 zero_threshold, first_index, last_index);
4641 const int64_t zero_high_distance = distance_query_.RangeSumWithThreshold(
4642 zero_threshold, first_index, last_index);
4643 // "Above" energy is the energy caused by total_force above the threshold.
4644 // Since "above" energy is only incurred during "high" transitions, it can
4645 // be computed from "high" energy knowing distance and threshold.
4646 const int64_t zero_energy_above =
4647 CapSub(zero_high_energy, CapProd(zero_high_distance, zero_threshold));
4648 // To compute the energy values of the candidate, the force dimension
4649 // must be offset back to the candidate's total force.
4650 // Only the "below" energy is changed by the offset, the zero_ energy
4651 // above the zero_ threshold was computed to be the same as the candidate
4652 // energy above the actual threshold.
4653 CapAddTo(zero_energy_above, &energy_above);
4654 CapAddTo(CapAdd(CapSub(zero_total_energy, zero_energy_above),
4655 CapProd(total_distance,
4656 CapSub(cost.threshold, zero_threshold))),
4657 &energy_below);
4658 // We reuse the partial sum of the force query to compute the sum of
4659 // forces incurred by the chain,
4660 const int64_t zero_total_force_last =
4661 force_rmq_.array()[force_rmq_index_of_node_[chain.Last()]];
4662 CapAddTo(CapSub(zero_total_force_last, zero_total_force_first),
4663 &total_force);
4664 prev_node = chain.Last();
4665 } else {
4666 for (const int node : chain.WithoutFirstNode()) {
4667 const int64_t force = force_is_cached ? cached_force_[prev_node]
4668 : force_evaluator(prev_node);
4669 const int64_t distance = distance_is_cached
4670 ? cached_distance_[prev_node]
4671 : distance_evaluator(prev_node, node);
4672 CapAddTo(force, &total_force);
4673 CapAddTo(CapProd(std::min(cost.threshold, total_force), distance),
4674 &energy_below);
4675 const int64_t force_above =
4676 std::max<int64_t>(0, CapSub(total_force, cost.threshold));
4677 CapAddTo(CapProd(force_above, distance), &energy_above);
4678 prev_node = node;
4679 }
4680 }
4681 }
4682
4683 return CapAdd(CapProd(energy_below, cost.cost_per_unit_below_threshold),
4684 CapProd(energy_above, cost.cost_per_unit_above_threshold));
4685}
4686
4687namespace {
4688
4689class PathEnergyCostFilter : public LocalSearchFilter {
4690 public:
4691 std::string DebugString() const override { return name_; }
4692 PathEnergyCostFilter(std::unique_ptr<PathEnergyCostChecker> checker,
4693 absl::string_view energy_name)
4694 : checker_(std::move(checker)),
4695 name_(absl::StrCat("PathEnergyCostFilter(", energy_name, ")")) {}
4696
4697 bool Accept(const Assignment*, const Assignment*, int64_t objective_min,
4698 int64_t objective_max) override {
4699 if (objective_max > kint64max / 2) return true;
4700 if (!checker_->Check()) return false;
4701 const int64_t cost = checker_->AcceptedCost();
4702 return objective_min <= cost && cost <= objective_max;
4703 }
4704
4705 void Synchronize(const Assignment*, const Assignment*) override {
4706 checker_->Commit();
4707 }
4708
4709 int64_t GetSynchronizedObjectiveValue() const override {
4710 return checker_->CommittedCost();
4711 }
4712 int64_t GetAcceptedObjectiveValue() const override {
4713 return checker_->AcceptedCost();
4714 }
4715
4716 private:
4717 std::unique_ptr<PathEnergyCostChecker> checker_;
4718 const std::string name_;
4719};
4720
4721} // namespace
4722
4724 Solver* solver, std::unique_ptr<PathEnergyCostChecker> checker,
4725 absl::string_view dimension_name) {
4726 PathEnergyCostFilter* filter =
4727 new PathEnergyCostFilter(std::move(checker), dimension_name);
4728 return solver->RevAlloc(filter);
4729}
4730
4731// TODO(user): Implement same-vehicle filter. Could be merged with node
4732// precedence filter.
4733
4734} // namespace operations_research
const std::vector< E > & elements() const
AssignmentContainer< IntVar, IntVarElement > IntContainer
const IntContainer & IntVarContainer() const
Generic path-based filter class.
BasePathFilter(const std::vector< IntVar * > &nexts, int next_domain_size, const PathsMetadata &paths_metadata)
bool Accept(const Assignment *delta, const Assignment *deltadelta, int64_t objective_min, int64_t objective_max) override
void OnSynchronize(const Assignment *delta) override
DimensionChecker(const PathState *path_state, std::vector< Interval > path_capacity, std::vector< int > path_class, std::vector< std::function< Interval(int64_t, int64_t)> > demand_per_path_class, std::vector< Interval > node_capacity, int min_range_size_for_riq=kOptimalMinRangeSizeForRIQ)
absl::Span< Interval > MutableCumuls(int path)
Returns a mutable view of the cumul mins of the path, in the current state.
std::vector< VehicleBreak > & MutableVehicleBreaks(int path)
absl::Span< const int64_t > TravelSums(int path) const
Returns a const view of the travel sums of the path, in the current state.
virtual int64_t Min() const =0
virtual int64_t Max() const =0
IntVarLocalSearchFilter(const std::vector< IntVar * > &vars)
bool FindIndex(IntVar *const var, int64_t *index) const
IntVar * Var() override
Creates a variable from the expression.
LightVehicleBreaksChecker(PathState *path_state, std::vector< PathData > path_data)
PathEnergyCostChecker(const PathState *path_state, std::vector< int64_t > force_start_min, std::vector< int64_t > force_end_min, std::vector< int > force_class, std::vector< const std::function< int64_t(int64_t)> * > force_per_class, std::vector< int > distance_class, std::vector< const std::function< int64_t(int64_t, int64_t)> * > distance_per_class, std::vector< EnergyCost > path_energy_cost, std::vector< bool > path_has_cost_when_empty)
A ChainRange is a range of Chains, committed or not.
void ChangeLoops(absl::Span< const int > new_loops)
Describes the nodes that are newly loops in this change.
const std::vector< int > & ChangedLoops() const
Returns the set of loops that were added by the change.
void Reset()
Sets all paths to start->end, all other nodes to kUnassigned.
const std::vector< int > & ChangedPaths() const
void Commit()
Set the current state G1 as committed. See class comment for details.
ChainRange Chains(int path) const
Returns the current range of chains of path.
static constexpr int kUnassigned
State-dependent accessors.
void Revert()
Erase incremental changes. See class comment for details.
NodeRange Nodes(int path) const
Returns the current range of nodes of path.
PathState(int num_nodes, std::vector< int > path_start, std::vector< int > path_end)
void ChangePath(int path, absl::Span< const ChainBounds > chains)
State modifiers.
int Start(int path) const
Returns the start of a path.
const std::vector< T > & array() const
Returns the concatenated sequence of all elements in all arrays.
int TableSize() const
Returns the number of elements in sparse tables, excluding new elements.
For the time being, Solver is neither MT_SAFE nor MT_HOT.
void PushBack(int64_t height, int64_t weight)
Adds an element at index this->Size().
int64_t RangeSumWithThreshold(int64_t threshold_height, int begin_index, int end_index) const
int TreeSize() const
Returns the total number of elements in trees.
Collection::value_type::second_type & LookupOrInsert(Collection *const collection, const typename Collection::value_type::first_type &key, const typename Collection::value_type::second_type &value)
Definition map_util.h:242
const MapUtilMappedT< Collection > & FindWithDefault(const Collection &collection, const KeyType &key, const MapUtilMappedT< Collection > &value)
Definition map_util.h:36
For infeasible and unbounded see Not checked if options check_solutions_if_inf_or_unbounded and the If options first_solution_only is false
problem is infeasible or unbounded (default).
Definition matchers.h:468
std::function< int64_t(const Model &)> Value(IntegerVariable v)
This checks that the variable is fixed.
Definition integer.h:1559
In SWIG mode, we don't want anything besides these top-level includes.
IntVarLocalSearchFilter * MakeVehicleAmortizedCostFilter(const RoutingModel &routing_model)
Returns a filter computing vehicle amortized costs.
int64_t CapAdd(int64_t x, int64_t y)
IntVarLocalSearchFilter * MakeRouteConstraintFilter(const RoutingModel &routing_model)
Returns a filter tracking route constraints.
void CapAddTo(int64_t x, int64_t *y)
LocalSearchFilter * MakePickupDeliveryFilter(const RoutingModel &routing_model, const PathState *path_state, const std::vector< PickupDeliveryPair > &pairs, const std::vector< RoutingModel::PickupAndDeliveryPolicy > &vehicle_policies)
IntVarLocalSearchFilter * MakeMaxActiveVehiclesFilter(const RoutingModel &routing_model)
Returns a filter ensuring that max active vehicles constraints are enforced.
void AppendLightWeightDimensionFilters(const PathState *path_state, const std::vector< RoutingDimension * > &dimensions, std::vector< LocalSearchFilterManager::FilterEvent > *filters)
int64_t ComputeBestVehicleToResourceAssignment(absl::Span< const int > vehicles, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, std::vector< int > > &resource_indices_per_class, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, absl::flat_hash_set< int > > &ignored_resources_per_class, std::function< const std::vector< int64_t > *(int)> vehicle_to_resource_class_assignment_costs, std::vector< int > *resource_indices)
IntVarLocalSearchFilter * MakeNodeDisjunctionFilter(const RoutingModel &routing_model, bool filter_cost)
Returns a filter ensuring that node disjunction constraints are enforced.
int64_t CapSub(int64_t x, int64_t y)
LocalSearchFilter * MakePathEnergyCostFilter(Solver *solver, std::unique_ptr< PathEnergyCostChecker > checker, absl::string_view dimension_name)
LinearExpr operator-(LinearExpr lhs, const LinearExpr &rhs)
void CapSubFrom(int64_t amount, int64_t *target)
Updates *target with CapSub(*target, amount).
IntVarLocalSearchFilter * MakeCumulBoundsPropagatorFilter(const RoutingDimension &dimension)
Returns a filter handling dimension cumul bounds.
LocalSearchFilter * MakePathStateFilter(Solver *solver, std::unique_ptr< PathState > path_state, const std::vector< IntVar * > &nexts)
LocalSearchFilter * MakeLightVehicleBreaksFilter(Solver *solver, std::unique_ptr< LightVehicleBreaksChecker > checker, absl::string_view dimension_name)
IntVarLocalSearchFilter * MakePathCumulFilter(const RoutingDimension &dimension, bool propagate_own_objective_value, bool filter_objective_cost, bool may_use_optimizers)
Returns a filter handling dimension costs and constraints.
IntVarLocalSearchFilter * MakeCPFeasibilityFilter(RoutingModel *routing_model)
Returns a filter checking the current solution using CP propagation.
bool ComputeVehicleToResourceClassAssignmentCosts(int v, double solve_duration_ratio, const RoutingModel::ResourceGroup &resource_group, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, absl::flat_hash_set< int > > &ignored_resources_per_class, const std::function< int64_t(int64_t)> &next_accessor, const std::function< int64_t(int64_t, int64_t)> &transit_accessor, bool optimize_vehicle_costs, LocalDimensionCumulOptimizer *lp_optimizer, LocalDimensionCumulOptimizer *mp_optimizer, std::vector< int64_t > *assignment_costs, std::vector< std::vector< int64_t > > *cumul_values, std::vector< std::vector< int64_t > > *break_values)
LinearExpr operator+(LinearExpr lhs, const LinearExpr &rhs)
int64_t CapProd(int64_t x, int64_t y)
LocalSearchFilter * MakeVehicleVarFilter(const RoutingModel &routing_model, const PathState *path_state)
Returns a filter checking that vehicle variable domains are respected.
static const int kUnassigned
--— Routing model --—
Definition routing.cc:507
void AppendDimensionCumulFilters(const std::vector< RoutingDimension * > &dimensions, const RoutingSearchParameters &parameters, bool filter_objective_cost, bool use_chain_cumul_filter, std::vector< LocalSearchFilterManager::FilterEvent > *filters)
LocalSearchFilter * MakeResourceAssignmentFilter(LocalDimensionCumulOptimizer *lp_optimizer, LocalDimensionCumulOptimizer *mp_optimizer, bool propagate_own_objective_value, bool filter_objective_cost)
bool PropagateLightweightVehicleBreaks(int path, DimensionValues &dimension_values, absl::Span< const std::pair< int64_t, int64_t > > interbreaks)
int MostSignificantBitPosition32(uint32_t n)
Definition bitset.h:277
IntVarLocalSearchFilter * MakeTypeRegulationsFilter(const RoutingModel &routing_model)
Returns a filter ensuring type regulation constraints are enforced.
IntVarLocalSearchFilter * MakeGlobalLPCumulFilter(GlobalDimensionCumulOptimizer *lp_optimizer, GlobalDimensionCumulOptimizer *mp_optimizer, bool filter_objective_cost)
Returns a filter checking global linear constraints and costs.
IntVarLocalSearchFilter * MakeActiveNodeGroupFilter(const RoutingModel &routing_model)
int64_t CapOpp(int64_t v)
Note(user): -kint64min != kint64max, but kint64max == ~kint64min.
LocalSearchFilter * MakeDimensionFilter(Solver *solver, std::unique_ptr< DimensionChecker > checker, absl::string_view dimension_name)
STL namespace.
trees with all degrees equal to
ABSL_FLAG(bool, routing_strong_debug_checks, false, "Run stronger checks in debug; these stronger tests might change " "the complexity of the code in particular.")
Implementation of local search filters for routing models.
static const int64_t kint64max
Definition types.h:32
static const int64_t kint64min
Definition types.h:31