Google OR-Tools v9.14
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
routing_filters.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// Implementation of local search filters for routing models.
15
17
18#include <stddef.h>
19
20#include <algorithm>
21#include <cstdint>
22#include <deque>
23#include <functional>
24#include <limits>
25#include <memory>
26#include <optional>
27#include <string>
28#include <tuple>
29#include <utility>
30#include <vector>
31
32#include "absl/algorithm/container.h"
33#include "absl/container/btree_set.h"
34#include "absl/container/flat_hash_map.h"
35#include "absl/container/flat_hash_set.h"
36#include "absl/flags/flag.h"
37#include "absl/log/check.h"
38#include "absl/strings/str_cat.h"
39#include "absl/strings/string_view.h"
40#include "absl/types/span.h"
44#include "ortools/base/types.h"
49#include "ortools/constraint_solver/routing_parameters.pb.h"
51#include "ortools/util/bitset.h"
55
56ABSL_FLAG(bool, routing_strong_debug_checks, false,
57 "Run stronger checks in debug; these stronger tests might change "
58 "the complexity of the code in particular.");
59
60namespace operations_research {
61
62namespace {
63// Route constraint filter
64
65class RouteConstraintFilter : public BasePathFilter {
66 public:
67 explicit RouteConstraintFilter(const RoutingModel& routing_model)
68 : BasePathFilter(routing_model.Nexts(),
69 routing_model.Size() + routing_model.vehicles(),
70 routing_model.GetPathsMetadata()),
71 routing_model_(routing_model),
72 current_vehicle_cost_(0),
73 delta_vehicle_cost_(0),
74 current_vehicle_costs_(routing_model.vehicles(), 0) {
75 start_to_vehicle_.resize(Size(), -1);
76 vehicle_to_start_.resize(routing_model.vehicles());
77 for (int v = 0; v < routing_model.vehicles(); v++) {
78 const int64_t start = routing_model.Start(v);
79 start_to_vehicle_[start] = v;
80 vehicle_to_start_[v] = start;
81 }
82 }
83 ~RouteConstraintFilter() override = default;
84 std::string DebugString() const override { return "RouteConstraintFilter"; }
85 int64_t GetSynchronizedObjectiveValue() const override {
86 return current_vehicle_cost_;
87 }
88 int64_t GetAcceptedObjectiveValue() const override {
89 return lns_detected() ? 0 : delta_vehicle_cost_;
90 }
91
92 private:
93 void OnSynchronizePathFromStart(int64_t start) override {
94 route_.clear();
95 int64_t node = start;
96 while (node < Size()) {
97 route_.push_back(node);
98 node = Value(node);
99 }
100 route_.push_back(node);
101 std::optional<int64_t> route_cost = routing_model_.GetRouteCost(route_);
102 DCHECK(route_cost.has_value());
103 current_vehicle_costs_[start_to_vehicle_[start]] = route_cost.value();
104 }
105 void OnAfterSynchronizePaths() override {
106 current_vehicle_cost_ = 0;
107 for (int vehicle = 0; vehicle < vehicle_to_start_.size(); vehicle++) {
108 const int64_t start = vehicle_to_start_[vehicle];
109 DCHECK_EQ(vehicle, start_to_vehicle_[start]);
110 if (!IsVarSynced(start)) {
111 return;
112 }
113 CapAddTo(current_vehicle_costs_[vehicle], &current_vehicle_cost_);
114 }
115 }
116 bool InitializeAcceptPath() override {
117 delta_vehicle_cost_ = current_vehicle_cost_;
118 return true;
119 }
120 bool AcceptPath(int64_t path_start, int64_t /*chain_start*/,
121 int64_t /*chain_end*/) override {
122 delta_vehicle_cost_ =
123 CapSub(delta_vehicle_cost_,
124 current_vehicle_costs_[start_to_vehicle_[path_start]]);
125 route_.clear();
126 int64_t node = path_start;
127 while (node < Size()) {
128 route_.push_back(node);
129 node = GetNext(node);
130 }
131 route_.push_back(node);
132 std::optional<int64_t> route_cost = routing_model_.GetRouteCost(route_);
133 if (!route_cost.has_value()) {
134 return false;
135 }
136 CapAddTo(route_cost.value(), &delta_vehicle_cost_);
137 return true;
138 }
139 bool FinalizeAcceptPath(int64_t /*objective_min*/,
140 int64_t objective_max) override {
141 return delta_vehicle_cost_ <= objective_max;
142 }
143
144 const RoutingModel& routing_model_;
145 int64_t current_vehicle_cost_;
146 int64_t delta_vehicle_cost_;
147 std::vector<int64_t> current_vehicle_costs_;
148 std::vector<int64_t> vehicle_to_start_;
149 std::vector<int> start_to_vehicle_;
150 std::vector<int64_t> route_;
151};
152
153} // namespace
154
156 const RoutingModel& routing_model) {
157 return routing_model.solver()->RevAlloc(
158 new RouteConstraintFilter(routing_model));
159}
160
161namespace {
162
163// Max active vehicles filter.
164
165class MaxActiveVehiclesFilter : public IntVarLocalSearchFilter {
166 public:
167 explicit MaxActiveVehiclesFilter(const RoutingModel& routing_model)
168 : IntVarLocalSearchFilter(routing_model.Nexts()),
169 routing_model_(routing_model),
170 is_active_(routing_model.vehicles(), false),
171 active_vehicles_(0) {}
172 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
173 int64_t /*objective_min*/, int64_t /*objective_max*/) override {
174 const int64_t kUnassigned = -1;
175 const Assignment::IntContainer& container = delta->IntVarContainer();
176 int current_active_vehicles = active_vehicles_;
177 for (const IntVarElement& new_element : container.elements()) {
178 IntVar* const var = new_element.Var();
179 int64_t index = kUnassigned;
180 if (FindIndex(var, &index) && routing_model_.IsStart(index)) {
181 if (new_element.Min() != new_element.Max()) {
182 // LNS detected.
183 return true;
184 }
185 const int vehicle = routing_model_.VehicleIndex(index);
186 const bool is_active =
187 (new_element.Min() != routing_model_.End(vehicle));
188 if (is_active && !is_active_[vehicle]) {
189 ++current_active_vehicles;
190 } else if (!is_active && is_active_[vehicle]) {
191 --current_active_vehicles;
192 }
193 }
194 }
195 return current_active_vehicles <=
196 routing_model_.GetMaximumNumberOfActiveVehicles();
197 }
198
199 private:
200 void OnSynchronize(const Assignment* /*delta*/) override {
201 active_vehicles_ = 0;
202 for (int i = 0; i < routing_model_.vehicles(); ++i) {
203 const int index = routing_model_.Start(i);
204 if (IsVarSynced(index) && Value(index) != routing_model_.End(i)) {
205 is_active_[i] = true;
206 ++active_vehicles_;
207 } else {
208 is_active_[i] = false;
209 }
210 }
211 }
212
213 const RoutingModel& routing_model_;
214 std::vector<bool> is_active_;
215 int active_vehicles_;
216};
217} // namespace
218
220 const RoutingModel& routing_model) {
221 return routing_model.solver()->RevAlloc(
222 new MaxActiveVehiclesFilter(routing_model));
223}
224
225namespace {
226
227class ActiveNodeGroupFilter : public IntVarLocalSearchFilter {
228 public:
229 explicit ActiveNodeGroupFilter(const RoutingModel& routing_model)
230 : IntVarLocalSearchFilter(routing_model.Nexts()),
231 routing_model_(routing_model),
232 active_count_per_group_(routing_model.GetSameActivityGroupsCount(),
233 {.active = 0, .unknown = 0}),
234 node_is_active_(routing_model.Nexts().size(), false),
235 node_is_unknown_(routing_model.Nexts().size(), false) {}
236
237 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
238 int64_t /*objective_min*/, int64_t /*objective_max*/) override {
239 active_count_per_group_.Revert();
240 const Assignment::IntContainer& container = delta->IntVarContainer();
241 for (const IntVarElement& new_element : container.elements()) {
242 IntVar* const var = new_element.Var();
243 int64_t index = -1;
244 if (!FindIndex(var, &index)) continue;
245 const int group = routing_model_.GetSameActivityGroupOfIndex(index);
246 ActivityCounts counts = active_count_per_group_.Get(group);
247 // Change contribution to counts: remove old state, add new state.
248 if (node_is_unknown_[index]) --counts.unknown;
249 if (node_is_active_[index]) --counts.active;
250 if (new_element.Min() != new_element.Max()) {
251 ++counts.unknown;
252 } else if (new_element.Min() != index) {
253 ++counts.active;
254 }
255 active_count_per_group_.Set(group, counts);
256 }
257 for (const int group : active_count_per_group_.ChangedIndices()) {
258 const ActivityCounts counts = active_count_per_group_.Get(group);
259 const int group_size =
260 routing_model_.GetSameActivityIndicesOfGroup(group).size();
261 // The group constraint is respected iff either 0 or group size is inside
262 // interval [num_active, num_active + num_unknown],
263 if (counts.active == 0) continue;
264 if (counts.active <= group_size &&
265 group_size <= counts.active + counts.unknown) {
266 continue;
267 }
268 return false;
269 }
270 return true;
271 }
272 std::string DebugString() const override { return "ActiveNodeGroupFilter"; }
273
274 private:
275 void OnSynchronize(const Assignment* /*delta*/) override {
276 const int num_groups = routing_model_.GetSameActivityGroupsCount();
277 for (int group = 0; group < num_groups; ++group) {
278 ActivityCounts counts = {.active = 0, .unknown = 0};
279 for (int node : routing_model_.GetSameActivityIndicesOfGroup(group)) {
280 if (IsVarSynced(node)) {
281 const bool is_active = (Value(node) != node);
282 node_is_active_[node] = is_active;
283 node_is_unknown_[node] = false;
284 counts.active += is_active ? 1 : 0;
285 } else {
286 ++counts.unknown;
287 node_is_unknown_[node] = true;
288 node_is_active_[node] = false;
289 }
290 }
291 active_count_per_group_.Set(group, counts);
292 }
293 active_count_per_group_.Commit();
294 }
295
296 const RoutingModel& routing_model_;
297 struct ActivityCounts {
298 int active;
299 int unknown;
300 };
301 CommittableArray<ActivityCounts> active_count_per_group_;
302 // node_is_active_[node] is true iff node was synced and active at last
303 // Synchronize().
304 std::vector<bool> node_is_active_;
305 // node_is_unknown_[node] is true iff node was not synced at last
306 // Synchronize().
307 std::vector<bool> node_is_unknown_;
308};
309
310} // namespace
311
313 const RoutingModel& routing_model) {
314 return routing_model.solver()->RevAlloc(
315 new ActiveNodeGroupFilter(routing_model));
316}
317
318namespace {
319
320// Node disjunction filter class.
321class NodeDisjunctionFilter : public IntVarLocalSearchFilter {
322 public:
323 explicit NodeDisjunctionFilter(const RoutingModel& routing_model,
324 bool filter_cost)
325 : IntVarLocalSearchFilter(routing_model.Nexts()),
326 routing_model_(routing_model),
327 count_per_disjunction_(routing_model.GetNumberOfDisjunctions(),
328 {.active = 0, .inactive = 0}),
329 synchronized_objective_value_(std::numeric_limits<int64_t>::min()),
330 accepted_objective_value_(std::numeric_limits<int64_t>::min()),
331 filter_cost_(filter_cost),
332 has_mandatory_disjunctions_(routing_model.HasMandatoryDisjunctions()) {}
333
334 using Disjunction = RoutingModel::DisjunctionIndex;
335
336 bool Accept(const Assignment* delta, const Assignment* /*deltadelta*/,
337 int64_t /*objective_min*/, int64_t objective_max) override {
338 count_per_disjunction_.Revert();
339 bool lns_detected = false;
340 // Update the active/inactive counts of each modified disjunction.
341 for (const IntVarElement& element : delta->IntVarContainer().elements()) {
342 int64_t node = -1;
343 if (!FindIndex(element.Var(), &node)) continue;
344 lns_detected |= element.Min() != element.Max();
345 // Compute difference in how this node contributes to activity counts.
346 const bool is_var_synced = IsVarSynced(node);
347 const bool was_active = is_var_synced && Value(node) != node;
348 const bool is_active = node < element.Min() || element.Max() < node;
349 ActivityCount contribution_delta = {.active = 0, .inactive = 0};
350 if (is_var_synced) {
351 contribution_delta.active -= was_active;
352 contribution_delta.inactive -= !was_active;
353 }
354 contribution_delta.active += is_active;
355 contribution_delta.inactive += !is_active;
356 // Common shortcut: if the change is neutral, counts stay the same.
357 if (contribution_delta.active == 0 && contribution_delta.inactive == 0) {
358 continue;
359 }
360 // Change counts of all disjunctions affected by this node.
361 for (const Disjunction disjunction :
362 routing_model_.GetDisjunctionIndices(node)) {
363 ActivityCount new_count =
364 count_per_disjunction_.Get(disjunction.value());
365 new_count.active += contribution_delta.active;
366 new_count.inactive += contribution_delta.inactive;
367 count_per_disjunction_.Set(disjunction.value(), new_count);
368 }
369 }
370 // Check if any disjunction has too many active nodes.
371 for (const int index : count_per_disjunction_.ChangedIndices()) {
372 if (count_per_disjunction_.Get(index).active >
373 routing_model_.GetDisjunctionMaxCardinality(Disjunction(index))) {
374 return false;
375 }
376 }
377 if (lns_detected || (!filter_cost_ && !has_mandatory_disjunctions_)) {
378 accepted_objective_value_ = 0;
379 return true;
380 }
381 // Update penalty costs for disjunctions.
382 accepted_objective_value_ = synchronized_objective_value_;
383 for (const int index : count_per_disjunction_.ChangedIndices()) {
384 // If num inactives did not change, skip. Common shortcut.
385 const int old_inactives =
386 count_per_disjunction_.GetCommitted(index).inactive;
387 const int new_inactives = count_per_disjunction_.Get(index).inactive;
388 if (old_inactives == new_inactives) continue;
389 // If this disjunction has no penalty for inactive nodes, skip.
390 const Disjunction disjunction(index);
391 const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction);
392 if (penalty == 0) continue;
393
394 // Compute the new cost of activity bound violations.
395 const int max_inactives =
396 routing_model_.GetDisjunctionNodeIndices(disjunction).size() -
397 routing_model_.GetDisjunctionMaxCardinality(disjunction);
398 int new_violation = std::max(0, new_inactives - max_inactives);
399 int old_violation = std::max(0, old_inactives - max_inactives);
400 // If nodes are mandatory, there can be no violation.
401 if (penalty < 0 && new_violation > 0) return false;
402 if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) ==
403 RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE) {
404 new_violation = std::min(1, new_violation);
405 old_violation = std::min(1, old_violation);
406 }
407 CapAddTo(CapProd(penalty, (new_violation - old_violation)),
408 &accepted_objective_value_);
409 }
410 // Only compare to max as a cost lower bound is computed.
411 return accepted_objective_value_ <= objective_max;
412 }
413 std::string DebugString() const override { return "NodeDisjunctionFilter"; }
414 int64_t GetSynchronizedObjectiveValue() const override {
415 return synchronized_objective_value_;
416 }
417 int64_t GetAcceptedObjectiveValue() const override {
418 return accepted_objective_value_;
419 }
420
421 private:
422 void OnSynchronize(const Assignment* /*delta*/) override {
423 synchronized_objective_value_ = 0;
424 count_per_disjunction_.Revert();
425 const int num_disjunctions = routing_model_.GetNumberOfDisjunctions();
426 for (Disjunction disjunction(0); disjunction < num_disjunctions;
427 ++disjunction) {
428 // Count number of active/inactive nodes of this disjunction.
429 ActivityCount count = {.active = 0, .inactive = 0};
430 const auto& nodes = routing_model_.GetDisjunctionNodeIndices(disjunction);
431 for (const int64_t node : nodes) {
432 if (!IsVarSynced(node)) continue;
433 const int is_active = Value(node) != node;
434 count.active += is_active;
435 count.inactive += !is_active;
436 }
437 count_per_disjunction_.Set(disjunction.value(), count);
438 // Add penalty of this disjunction to total cost.
439 if (!filter_cost_) continue;
440 const int64_t penalty = routing_model_.GetDisjunctionPenalty(disjunction);
441 const int max_actives =
442 routing_model_.GetDisjunctionMaxCardinality(disjunction);
443 int violation = count.inactive - (nodes.size() - max_actives);
444 if (violation > 0 && penalty > 0) {
445 if (routing_model_.GetDisjunctionPenaltyCostBehavior(disjunction) ==
446 RoutingModel::PenaltyCostBehavior::PENALIZE_ONCE) {
447 violation = std::min(1, violation);
448 }
449 CapAddTo(CapProd(penalty, violation), &synchronized_objective_value_);
450 }
451 }
452 count_per_disjunction_.Commit();
453 accepted_objective_value_ = synchronized_objective_value_;
454 }
455
456 const RoutingModel& routing_model_;
457 struct ActivityCount {
458 int active = 0;
459 int inactive = 0;
460 };
461 CommittableArray<ActivityCount> count_per_disjunction_;
462 int64_t synchronized_objective_value_;
463 int64_t accepted_objective_value_;
464 const bool filter_cost_;
465 const bool has_mandatory_disjunctions_;
466};
467} // namespace
468
470 const RoutingModel& routing_model, bool filter_cost) {
471 return routing_model.solver()->RevAlloc(
472 new NodeDisjunctionFilter(routing_model, filter_cost));
473}
474
475const int64_t BasePathFilter::kUnassigned = -1;
476
477BasePathFilter::BasePathFilter(const std::vector<IntVar*>& nexts,
478 int next_domain_size,
479 const PathsMetadata& paths_metadata)
481 paths_metadata_(paths_metadata),
482 node_path_starts_(next_domain_size, kUnassigned),
483 new_synchronized_unperformed_nodes_(nexts.size()),
484 new_nexts_(nexts.size(), kUnassigned),
485 touched_paths_(nexts.size()),
486 touched_path_chain_start_ends_(nexts.size(), {kUnassigned, kUnassigned}),
487 ranks_(next_domain_size, kUnassigned),
488 lns_detected_(false) {}
489
491 const Assignment* /*deltadelta*/,
492 int64_t objective_min, int64_t objective_max) {
493 lns_detected_ = false;
494 for (const int touched : delta_touched_) {
495 new_nexts_[touched] = kUnassigned;
496 }
497 delta_touched_.clear();
498 const Assignment::IntContainer& container = delta->IntVarContainer();
499 delta_touched_.reserve(container.Size());
500 // Determining touched paths and their touched chain start and ends (a node is
501 // touched if it corresponds to an element of delta or that an element of
502 // delta points to it).
503 // The start and end of a touched path subchain will have remained on the same
504 // path and will correspond to the min and max ranks of touched nodes in the
505 // current assignment.
506 for (int64_t touched_path : touched_paths_.PositionsSetAtLeastOnce()) {
507 touched_path_chain_start_ends_[touched_path] = {kUnassigned, kUnassigned};
508 }
509 touched_paths_.ResetAllToFalse();
510
511 const auto update_touched_path_chain_start_end = [this](int64_t index) {
512 const int64_t start = node_path_starts_[index];
513 if (start == kUnassigned) return;
514 touched_paths_.Set(start);
515
516 int64_t& chain_start = touched_path_chain_start_ends_[start].first;
517 if (chain_start == kUnassigned || paths_metadata_.IsStart(index) ||
518 ranks_[index] < ranks_[chain_start]) {
519 chain_start = index;
520 }
521
522 int64_t& chain_end = touched_path_chain_start_ends_[start].second;
523 if (chain_end == kUnassigned || paths_metadata_.IsEnd(index) ||
524 ranks_[index] > ranks_[chain_end]) {
525 chain_end = index;
526 }
527 };
528
529 for (const IntVarElement& new_element : container.elements()) {
530 IntVar* const var = new_element.Var();
531 int64_t index = kUnassigned;
532 if (FindIndex(var, &index)) {
533 if (!new_element.Bound()) {
534 // LNS detected
535 lns_detected_ = true;
536 return true;
537 }
538 new_nexts_[index] = new_element.Value();
539 delta_touched_.push_back(index);
540 update_touched_path_chain_start_end(index);
541 update_touched_path_chain_start_end(new_nexts_[index]);
542 }
543 }
544 // Checking feasibility of touched paths.
545 if (!InitializeAcceptPath()) return false;
546 for (const int64_t touched_start : touched_paths_.PositionsSetAtLeastOnce()) {
547 const std::pair<int64_t, int64_t> start_end =
548 touched_path_chain_start_ends_[touched_start];
549 if (!AcceptPath(touched_start, start_end.first, start_end.second)) {
550 return false;
551 }
552 }
553 // NOTE: FinalizeAcceptPath() is only called if InitializeAcceptPath() is true
554 // and all paths are accepted.
555 return FinalizeAcceptPath(objective_min, objective_max);
556}
557
558void BasePathFilter::ComputePathStarts(std::vector<int64_t>* path_starts,
559 std::vector<int>* index_to_path) {
560 path_starts->clear();
561 const int nexts_size = Size();
562 index_to_path->assign(nexts_size, kUnassigned);
563 Bitset64<> has_prevs(nexts_size);
564 for (int i = 0; i < nexts_size; ++i) {
565 if (!IsVarSynced(i)) {
566 has_prevs.Set(i);
567 } else {
568 const int next = Value(i);
569 if (next < nexts_size) {
570 has_prevs.Set(next);
571 }
572 }
573 }
574 for (int i = 0; i < nexts_size; ++i) {
575 if (!has_prevs[i]) {
576 (*index_to_path)[i] = path_starts->size();
577 path_starts->push_back(i);
578 }
579 }
580}
581
582void BasePathFilter::SynchronizeFullAssignment() {
583 for (int64_t index = 0; index < Size(); index++) {
584 if (IsVarSynced(index) && Value(index) == index &&
585 node_path_starts_[index] != kUnassigned) {
586 // index was performed before and is now unperformed.
587 new_synchronized_unperformed_nodes_.Set(index);
588 }
589 }
590 // Marking unactive nodes (which are not on a path).
591 node_path_starts_.assign(node_path_starts_.size(), kUnassigned);
592 // Marking nodes on a path and storing next values.
593 const int nexts_size = Size();
594 for (int path = 0; path < NumPaths(); ++path) {
595 const int64_t start = Start(path);
596 node_path_starts_[start] = start;
597 if (IsVarSynced(start)) {
598 int64_t next = Value(start);
599 while (next < nexts_size) {
600 int64_t node = next;
601 node_path_starts_[node] = start;
602 DCHECK(IsVarSynced(node));
603 next = Value(node);
604 }
605 node_path_starts_[next] = start;
606 }
607 node_path_starts_[End(path)] = start;
608 }
609 for (const int touched : delta_touched_) {
610 new_nexts_[touched] = kUnassigned;
611 }
612 delta_touched_.clear();
613 OnBeforeSynchronizePaths(/*synchronizing_all_paths=*/true);
614 UpdateAllRanks();
615 OnAfterSynchronizePaths();
616}
617
619 new_synchronized_unperformed_nodes_.ResetAllToFalse();
620 if (delta == nullptr || delta->Empty() ||
621 absl::c_all_of(ranks_, [](int rank) { return rank == kUnassigned; })) {
622 SynchronizeFullAssignment();
623 return;
624 }
625 const Assignment::IntContainer& container = delta->IntVarContainer();
626 touched_paths_.ResetAllToFalse();
627 for (const IntVarElement& new_element : container.elements()) {
628 int64_t index = kUnassigned;
629 if (FindIndex(new_element.Var(), &index)) {
630 const int64_t start = node_path_starts_[index];
631 if (start != kUnassigned) {
632 touched_paths_.Set(start);
633 if (Value(index) == index) {
634 // New unperformed node (its previous start isn't unassigned).
635 DCHECK_LT(index, new_nexts_.size());
636 new_synchronized_unperformed_nodes_.Set(index);
637 node_path_starts_[index] = kUnassigned;
638 }
639 }
640 }
641 }
642 for (const int touched : delta_touched_) {
643 new_nexts_[touched] = kUnassigned;
644 }
645 delta_touched_.clear();
646 OnBeforeSynchronizePaths(/*synchronizing_all_paths=*/false);
647 for (const int64_t touched_start : touched_paths_.PositionsSetAtLeastOnce()) {
648 int64_t node = touched_start;
649 while (node < Size()) {
650 node_path_starts_[node] = touched_start;
651 node = Value(node);
652 }
653 node_path_starts_[node] = touched_start;
654 UpdatePathRanksFromStart(touched_start);
655 OnSynchronizePathFromStart(touched_start);
656 }
657 OnAfterSynchronizePaths();
658}
659
660void BasePathFilter::UpdateAllRanks() {
661 ranks_.assign(ranks_.size(), kUnassigned);
662 for (int r = 0; r < NumPaths(); ++r) {
663 const int64_t start = Start(r);
664 if (!IsVarSynced(start)) continue;
665 UpdatePathRanksFromStart(start);
666 OnSynchronizePathFromStart(start);
667 }
668}
669
670void BasePathFilter::UpdatePathRanksFromStart(int start) {
671 int rank = 0;
672 int64_t node = start;
673 while (node < Size()) {
674 ranks_[node] = rank;
675 rank++;
676 node = Value(node);
677 }
678 ranks_[node] = rank;
679}
680
681namespace {
682
683class VehicleAmortizedCostFilter : public BasePathFilter {
684 public:
685 explicit VehicleAmortizedCostFilter(const RoutingModel& routing_model);
686 ~VehicleAmortizedCostFilter() override = default;
687 std::string DebugString() const override {
688 return "VehicleAmortizedCostFilter";
689 }
690 int64_t GetSynchronizedObjectiveValue() const override {
691 return current_vehicle_cost_;
692 }
693 int64_t GetAcceptedObjectiveValue() const override {
694 return lns_detected() ? 0 : delta_vehicle_cost_;
695 }
696
697 private:
698 void OnSynchronizePathFromStart(int64_t start) override;
699 void OnAfterSynchronizePaths() override;
700 bool InitializeAcceptPath() override;
701 bool AcceptPath(int64_t path_start, int64_t chain_start,
702 int64_t chain_end) override;
703 bool FinalizeAcceptPath(int64_t objective_min,
704 int64_t objective_max) override;
705
706 int64_t current_vehicle_cost_;
707 int64_t delta_vehicle_cost_;
708 std::vector<int> current_route_lengths_;
709 std::vector<int64_t> start_to_end_;
710 std::vector<int> start_to_vehicle_;
711 std::vector<int64_t> vehicle_to_start_;
712 const std::vector<int64_t>& linear_cost_factor_of_vehicle_;
713 const std::vector<int64_t>& quadratic_cost_factor_of_vehicle_;
714};
715
716VehicleAmortizedCostFilter::VehicleAmortizedCostFilter(
717 const RoutingModel& routing_model)
718 : BasePathFilter(routing_model.Nexts(),
719 routing_model.Size() + routing_model.vehicles(),
720 routing_model.GetPathsMetadata()),
721 current_vehicle_cost_(0),
722 delta_vehicle_cost_(0),
723 current_route_lengths_(Size(), -1),
724 linear_cost_factor_of_vehicle_(
725 routing_model.GetAmortizedLinearCostFactorOfVehicles()),
726 quadratic_cost_factor_of_vehicle_(
727 routing_model.GetAmortizedQuadraticCostFactorOfVehicles()) {
728 start_to_end_.resize(Size(), -1);
729 start_to_vehicle_.resize(Size(), -1);
730 vehicle_to_start_.resize(routing_model.vehicles());
731 for (int v = 0; v < routing_model.vehicles(); v++) {
732 const int64_t start = routing_model.Start(v);
733 start_to_vehicle_[start] = v;
734 start_to_end_[start] = routing_model.End(v);
735 vehicle_to_start_[v] = start;
736 }
737}
738
739void VehicleAmortizedCostFilter::OnSynchronizePathFromStart(int64_t start) {
740 const int64_t end = start_to_end_[start];
741 CHECK_GE(end, 0);
742 const int route_length = Rank(end) - 1;
743 CHECK_GE(route_length, 0);
744 current_route_lengths_[start] = route_length;
745}
746
747void VehicleAmortizedCostFilter::OnAfterSynchronizePaths() {
748 current_vehicle_cost_ = 0;
749 for (int vehicle = 0; vehicle < vehicle_to_start_.size(); vehicle++) {
750 const int64_t start = vehicle_to_start_[vehicle];
751 DCHECK_EQ(vehicle, start_to_vehicle_[start]);
752 if (!IsVarSynced(start)) {
753 return;
754 }
755 const int route_length = current_route_lengths_[start];
756 DCHECK_GE(route_length, 0);
757
758 if (route_length == 0) {
759 // The path is empty.
760 continue;
761 }
762
763 const int64_t linear_cost_factor = linear_cost_factor_of_vehicle_[vehicle];
764 const int64_t route_length_cost =
765 CapProd(quadratic_cost_factor_of_vehicle_[vehicle],
766 route_length * route_length);
767
768 CapAddTo(CapSub(linear_cost_factor, route_length_cost),
769 &current_vehicle_cost_);
770 }
771}
772
773bool VehicleAmortizedCostFilter::InitializeAcceptPath() {
774 delta_vehicle_cost_ = current_vehicle_cost_;
775 return true;
776}
777
778bool VehicleAmortizedCostFilter::AcceptPath(int64_t path_start,
779 int64_t chain_start,
780 int64_t chain_end) {
781 // Number of nodes previously between chain_start and chain_end
782 const int previous_chain_nodes = Rank(chain_end) - 1 - Rank(chain_start);
783 CHECK_GE(previous_chain_nodes, 0);
784 int new_chain_nodes = 0;
785 int64_t node = GetNext(chain_start);
786 while (node != chain_end) {
787 new_chain_nodes++;
788 node = GetNext(node);
789 }
790
791 const int previous_route_length = current_route_lengths_[path_start];
792 CHECK_GE(previous_route_length, 0);
793 const int new_route_length =
794 previous_route_length - previous_chain_nodes + new_chain_nodes;
795
796 const int vehicle = start_to_vehicle_[path_start];
797 CHECK_GE(vehicle, 0);
798 DCHECK_EQ(path_start, vehicle_to_start_[vehicle]);
799
800 // Update the cost related to used vehicles.
801 // TODO(user): Handle possible overflows.
802 if (previous_route_length == 0) {
803 // The route was empty before, it is no longer the case (changed path).
804 CHECK_GT(new_route_length, 0);
805 CapAddTo(linear_cost_factor_of_vehicle_[vehicle], &delta_vehicle_cost_);
806 } else if (new_route_length == 0) {
807 // The route is now empty.
808 delta_vehicle_cost_ =
809 CapSub(delta_vehicle_cost_, linear_cost_factor_of_vehicle_[vehicle]);
810 }
811
812 // Update the cost related to the sum of the squares of the route lengths.
813 const int64_t quadratic_cost_factor =
814 quadratic_cost_factor_of_vehicle_[vehicle];
815 CapAddTo(CapProd(quadratic_cost_factor,
816 previous_route_length * previous_route_length),
817 &delta_vehicle_cost_);
818 delta_vehicle_cost_ = CapSub(
819 delta_vehicle_cost_,
820 CapProd(quadratic_cost_factor, new_route_length * new_route_length));
821
822 return true;
823}
824
825bool VehicleAmortizedCostFilter::FinalizeAcceptPath(int64_t /*objective_min*/,
826 int64_t objective_max) {
827 return delta_vehicle_cost_ <= objective_max;
828}
829
830} // namespace
831
833 const RoutingModel& routing_model) {
834 return routing_model.solver()->RevAlloc(
835 new VehicleAmortizedCostFilter(routing_model));
836}
837
838namespace {
839
840class TypeRegulationsFilter : public BasePathFilter {
841 public:
842 explicit TypeRegulationsFilter(const RoutingModel& model);
843 ~TypeRegulationsFilter() override = default;
844 std::string DebugString() const override { return "TypeRegulationsFilter"; }
845
846 private:
847 void OnSynchronizePathFromStart(int64_t start) override;
848 bool AcceptPath(int64_t path_start, int64_t chain_start,
849 int64_t chain_end) override;
850
851 bool HardIncompatibilitiesRespected(int vehicle, int64_t chain_start,
852 int64_t chain_end);
853
854 const RoutingModel& routing_model_;
855 std::vector<int> start_to_vehicle_;
856 // The following vector is used to keep track of the type counts for hard
857 // incompatibilities.
858 std::vector<std::vector<int>> hard_incompatibility_type_counts_per_vehicle_;
859 // Used to verify the temporal incompatibilities and requirements.
860 TypeIncompatibilityChecker temporal_incompatibility_checker_;
861 TypeRequirementChecker requirement_checker_;
862};
863
864TypeRegulationsFilter::TypeRegulationsFilter(const RoutingModel& model)
865 : BasePathFilter(model.Nexts(), model.Size() + model.vehicles(),
866 model.GetPathsMetadata()),
867 routing_model_(model),
868 start_to_vehicle_(model.Size(), -1),
869 temporal_incompatibility_checker_(model,
870 /*check_hard_incompatibilities*/ false),
871 requirement_checker_(model) {
872 const int num_vehicles = model.vehicles();
873 const bool has_hard_type_incompatibilities =
874 model.HasHardTypeIncompatibilities();
875 if (has_hard_type_incompatibilities) {
876 hard_incompatibility_type_counts_per_vehicle_.resize(num_vehicles);
877 }
878 const int num_visit_types = model.GetNumberOfVisitTypes();
879 for (int vehicle = 0; vehicle < num_vehicles; vehicle++) {
880 const int64_t start = model.Start(vehicle);
881 start_to_vehicle_[start] = vehicle;
882 if (has_hard_type_incompatibilities) {
883 hard_incompatibility_type_counts_per_vehicle_[vehicle].resize(
884 num_visit_types, 0);
885 }
886 }
887}
888
889void TypeRegulationsFilter::OnSynchronizePathFromStart(int64_t start) {
890 if (!routing_model_.HasHardTypeIncompatibilities()) return;
891
892 const int vehicle = start_to_vehicle_[start];
893 CHECK_GE(vehicle, 0);
894 std::vector<int>& type_counts =
895 hard_incompatibility_type_counts_per_vehicle_[vehicle];
896 std::fill(type_counts.begin(), type_counts.end(), 0);
897 const int num_types = type_counts.size();
898
899 int64_t node = start;
900 while (node < Size()) {
901 DCHECK(IsVarSynced(node));
902 const int type = routing_model_.GetVisitType(node);
903 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
904 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
905 CHECK_LT(type, num_types);
906 type_counts[type]++;
907 }
908 node = Value(node);
909 }
910}
911
912bool TypeRegulationsFilter::HardIncompatibilitiesRespected(int vehicle,
913 int64_t chain_start,
914 int64_t chain_end) {
915 if (!routing_model_.HasHardTypeIncompatibilities()) return true;
916
917 const std::vector<int>& previous_type_counts =
918 hard_incompatibility_type_counts_per_vehicle_[vehicle];
919
920 absl::flat_hash_map</*type*/ int, /*new_count*/ int> new_type_counts;
921 absl::flat_hash_set<int> types_to_check;
922
923 // Go through the new nodes on the path and increment their type counts.
924 int64_t node = GetNext(chain_start);
925 while (node != chain_end) {
926 const int type = routing_model_.GetVisitType(node);
927 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
928 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
929 DCHECK_LT(type, previous_type_counts.size());
930 int& type_count = gtl::LookupOrInsert(&new_type_counts, type,
931 previous_type_counts[type]);
932 if (type_count++ == 0) {
933 // New type on the route, mark to check its incompatibilities.
934 types_to_check.insert(type);
935 }
936 }
937 node = GetNext(node);
938 }
939
940 // Update new_type_counts by decrementing the occurrence of the types of the
941 // nodes no longer on the route.
942 if (IsVarSynced(chain_start)) {
943 node = Value(chain_start);
944 while (node != chain_end) {
945 const int type = routing_model_.GetVisitType(node);
946 if (type >= 0 && routing_model_.GetVisitTypePolicy(node) !=
947 RoutingModel::ADDED_TYPE_REMOVED_FROM_VEHICLE) {
948 DCHECK_LT(type, previous_type_counts.size());
949 int& type_count = gtl::LookupOrInsert(&new_type_counts, type,
950 previous_type_counts[type]);
951 CHECK_GE(type_count, 1);
952 type_count--;
953 }
954 node = Value(node);
955 }
956 }
957
958 // Check the incompatibilities for types in types_to_check.
959 for (int type : types_to_check) {
960 for (int incompatible_type :
961 routing_model_.GetHardTypeIncompatibilitiesOfType(type)) {
962 if (gtl::FindWithDefault(new_type_counts, incompatible_type,
963 previous_type_counts[incompatible_type]) > 0) {
964 return false;
965 }
966 }
967 }
968 return true;
969}
970
971bool TypeRegulationsFilter::AcceptPath(int64_t path_start, int64_t chain_start,
972 int64_t chain_end) {
973 const int vehicle = start_to_vehicle_[path_start];
974 CHECK_GE(vehicle, 0);
975 const auto next_accessor = [this](int64_t node) { return GetNext(node); };
976 return HardIncompatibilitiesRespected(vehicle, chain_start, chain_end) &&
977 temporal_incompatibility_checker_.CheckVehicle(vehicle,
978 next_accessor) &&
979 requirement_checker_.CheckVehicle(vehicle, next_accessor);
980}
981
982} // namespace
983
985 const RoutingModel& routing_model) {
986 return routing_model.solver()->RevAlloc(
987 new TypeRegulationsFilter(routing_model));
988}
989
990namespace {
991
992// ChainCumul filter. Version of dimension path filter which is O(delta) rather
993// than O(length of touched paths). Currently only supports dimensions without
994// costs (global and local span cost, soft bounds) and with unconstrained
995// cumul variables except overall capacity and cumul variables of path ends.
996
997class ChainCumulFilter : public BasePathFilter {
998 public:
999 ChainCumulFilter(const RoutingModel& routing_model,
1000 const RoutingDimension& dimension);
1001 ~ChainCumulFilter() override = default;
1002 std::string DebugString() const override {
1003 return "ChainCumulFilter(" + name_ + ")";
1004 }
1005
1006 private:
1007 void OnSynchronizePathFromStart(int64_t start) override;
1008 bool AcceptPath(int64_t path_start, int64_t chain_start,
1009 int64_t chain_end) override;
1010
1011 const std::vector<IntVar*> cumuls_;
1012 std::vector<int64_t> start_to_vehicle_;
1013 std::vector<int64_t> start_to_end_;
1014 std::vector<const RoutingModel::TransitCallback2*> evaluators_;
1015 const std::vector<int64_t> vehicle_capacities_;
1016 std::vector<int64_t> current_path_cumul_mins_;
1017 std::vector<int64_t> current_max_of_path_end_cumul_mins_;
1018 std::vector<int64_t> old_nexts_;
1019 std::vector<int> old_vehicles_;
1020 std::vector<int64_t> current_transits_;
1021 const std::string name_;
1022};
1023
1024ChainCumulFilter::ChainCumulFilter(const RoutingModel& routing_model,
1025 const RoutingDimension& dimension)
1026 : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(),
1027 routing_model.GetPathsMetadata()),
1028 cumuls_(dimension.cumuls()),
1029 evaluators_(routing_model.vehicles(), nullptr),
1030 vehicle_capacities_(dimension.vehicle_capacities()),
1031 current_path_cumul_mins_(dimension.cumuls().size(), 0),
1032 current_max_of_path_end_cumul_mins_(dimension.cumuls().size(), 0),
1033 old_nexts_(routing_model.Size(), kUnassigned),
1034 old_vehicles_(routing_model.Size(), kUnassigned),
1035 current_transits_(routing_model.Size(), 0),
1036 name_(dimension.name()) {
1037 start_to_vehicle_.resize(Size(), -1);
1038 start_to_end_.resize(Size(), -1);
1039 for (int i = 0; i < routing_model.vehicles(); ++i) {
1040 start_to_vehicle_[routing_model.Start(i)] = i;
1041 start_to_end_[routing_model.Start(i)] = routing_model.End(i);
1042 evaluators_[i] = &dimension.transit_evaluator(i);
1043 }
1044}
1045
1046// On synchronization, maintain "propagated" cumul mins and max level of cumul
1047// from each node to the end of the path; to be used by AcceptPath to
1048// incrementally check feasibility.
1049void ChainCumulFilter::OnSynchronizePathFromStart(int64_t start) {
1050 const int vehicle = start_to_vehicle_[start];
1051 std::vector<int64_t> path_nodes;
1052 int64_t node = start;
1053 int64_t cumul = cumuls_[node]->Min();
1054 while (node < Size()) {
1055 path_nodes.push_back(node);
1056 current_path_cumul_mins_[node] = cumul;
1057 const int64_t next = Value(node);
1058 if (next != old_nexts_[node] || vehicle != old_vehicles_[node]) {
1059 old_nexts_[node] = next;
1060 old_vehicles_[node] = vehicle;
1061 current_transits_[node] = (*evaluators_[vehicle])(node, next);
1062 }
1063 CapAddTo(current_transits_[node], &cumul);
1064 cumul = std::max(cumuls_[next]->Min(), cumul);
1065 node = next;
1066 }
1067 path_nodes.push_back(node);
1068 current_path_cumul_mins_[node] = cumul;
1069 int64_t max_cumuls = cumul;
1070 for (int i = path_nodes.size() - 1; i >= 0; --i) {
1071 const int64_t node = path_nodes[i];
1072 max_cumuls = std::max(max_cumuls, current_path_cumul_mins_[node]);
1073 current_max_of_path_end_cumul_mins_[node] = max_cumuls;
1074 }
1075}
1076
1077// The complexity of the method is O(size of chain (chain_start...chain_end).
1078bool ChainCumulFilter::AcceptPath(int64_t path_start, int64_t chain_start,
1079 int64_t chain_end) {
1080 const int vehicle = start_to_vehicle_[path_start];
1081 const int64_t capacity = vehicle_capacities_[vehicle];
1082 int64_t node = chain_start;
1083 int64_t cumul = current_path_cumul_mins_[node];
1084 while (node != chain_end) {
1085 const int64_t next = GetNext(node);
1086 if (IsVarSynced(node) && next == Value(node) &&
1087 vehicle == old_vehicles_[node]) {
1088 CapAddTo(current_transits_[node], &cumul);
1089 } else {
1090 CapAddTo((*evaluators_[vehicle])(node, next), &cumul);
1091 }
1092 cumul = std::max(cumuls_[next]->Min(), cumul);
1093 if (cumul > capacity) return false;
1094 node = next;
1095 }
1096 const int64_t end = start_to_end_[path_start];
1097 const int64_t end_cumul_delta =
1098 CapSub(current_path_cumul_mins_[end], current_path_cumul_mins_[node]);
1099 const int64_t after_chain_cumul_delta =
1100 CapSub(current_max_of_path_end_cumul_mins_[node],
1101 current_path_cumul_mins_[node]);
1102 return CapAdd(cumul, after_chain_cumul_delta) <= capacity &&
1103 CapAdd(cumul, end_cumul_delta) <= cumuls_[end]->Max();
1104}
1105
1106} // namespace
1107
1109 int path, int64_t capacity, int64_t span_upper_bound,
1110 absl::Span<const DimensionValues::Interval> cumul_of_node,
1111 absl::Span<const DimensionValues::Interval> slack_of_node,
1112 absl::AnyInvocable<int64_t(int64_t, int64_t) const> evaluator,
1113 DimensionValues& dimension_values) {
1114 using Interval = DimensionValues::Interval;
1115 // Copy cumul min/max data from cumul variables.
1116 const int num_nodes = dimension_values.NumNodes(path);
1117 absl::Span<const int> nodes = dimension_values.Nodes(path);
1118 absl::Span<Interval> cumuls = dimension_values.MutableCumuls(path);
1119 for (int r = 0; r < num_nodes; ++r) {
1120 const int node = nodes[r];
1121 Interval cumul = cumul_of_node[node];
1122 if (!cumul.DecreaseMax(capacity)) return false;
1123 cumuls[r] = cumul;
1124 }
1125 // Extract travel data.
1126 // TODO(user): refine this logic to avoid more calls, using PathState
1127 // chains to find chains in the middle of the path, and vehicle travel class
1128 // to reuse chains from different vehicles.
1129 absl::Span<int64_t> travels = dimension_values.MutableTravels(path);
1130 {
1131 absl::Span<const int> cnodes = dimension_values.CommittedNodes(path);
1132 absl::Span<const int64_t> ctravels =
1133 dimension_values.CommittedTravels(path);
1134 // Reuse committed travels to avoid calling evaluator.
1135 // Split [0, num_nodes) into [0, i), [i, j), [j, num_nodes),
1136 // so that:
1137 // - nodes[r] == cnodes[r] for r in [0, i)
1138 // - nodes[r] == cnodes[r + delta] for r in [j, num_nodes)
1139 // with delta = num_cnodes - num_nodes.
1140 const int i_limit = std::min(cnodes.size(), nodes.size());
1141 DCHECK(cnodes.empty() || cnodes[0] == nodes[0]);
1142 int i = 1;
1143 while (i < i_limit && cnodes[i] == nodes[i]) {
1144 travels[i - 1] = ctravels[i - 1];
1145 ++i;
1146 }
1147 DCHECK(cnodes.empty() || cnodes.back() == nodes.back());
1148 int j = num_nodes - 2;
1149 const int delta = cnodes.size() - num_nodes;
1150 const int j_limit = i + std::max(0, -delta);
1151 while (j_limit <= j && nodes[j] == cnodes[j + delta]) {
1152 travels[j] = ctravels[j + delta];
1153 --j;
1154 }
1155 ++j;
1156 for (int r = i; r <= j; ++r) {
1157 const int64_t travel = evaluator(nodes[r - 1], nodes[r]);
1158 travels[r - 1] = travel;
1159 }
1160 }
1161 // Extract transit data, fill partial travel sums.
1162 absl::Span<Interval> transits = dimension_values.MutableTransits(path);
1163 absl::Span<int64_t> travel_sums = dimension_values.MutableTravelSums(path);
1164 int64_t total_travel = 0;
1165 travel_sums[0] = 0;
1166 for (int r = 1; r < num_nodes; ++r) {
1167 const int64_t travel = travels[r - 1];
1168 CapAddTo(travel, &total_travel);
1169 travel_sums[r] = total_travel;
1170 Interval transit{.min = travel, .max = travel};
1171 transit.Add(slack_of_node[nodes[r - 1]]);
1172 transits[r - 1] = transit;
1173 }
1174 if (travel_sums.back() > span_upper_bound) return false;
1175 dimension_values.MutableSpan(path) = {.min = travel_sums.back(),
1176 .max = span_upper_bound};
1177 return true;
1178}
1179
1180// Fills pre- and post-visits of a path, using pre/post-travel evaluators.
1181// NOTE: The visit at node A uses time interval
1182// [cumul(A) - previsit(A), cumul(A) + postvisit(A)).
1183// The pre-travel of travel A->B is the post-visit of A,
1184// and the post-travel of travel A->B is the pre-visit of B.
1185// TODO(user): when PathCumulFilter uses a PathState, replace
1186// dimension_values by a PathState.
1187// TODO(user): use committed values as a cache to avoid calling evaluators.
1189 int path, const DimensionValues& dimension_values,
1190 absl::AnyInvocable<int64_t(int64_t, int64_t) const> pre_travel_evaluator,
1191 absl::AnyInvocable<int64_t(int64_t, int64_t) const> post_travel_evaluator,
1192 PrePostVisitValues& visit_values) {
1193 const int num_nodes = dimension_values.NumNodes(path);
1194 visit_values.ChangePathSize(path, num_nodes);
1195 absl::Span<int64_t> pre_visits = visit_values.MutablePreVisits(path);
1196 absl::Span<int64_t> post_visits = visit_values.MutablePostVisits(path);
1197 absl::Span<const int> nodes = dimension_values.Nodes(path);
1198 if (pre_travel_evaluator == nullptr) {
1199 absl::c_fill(post_visits, 0);
1200 } else {
1201 for (int i = 0; i < num_nodes - 1; ++i) {
1202 post_visits[i] = pre_travel_evaluator(nodes[i], nodes[i + 1]);
1203 }
1204 post_visits.back() = 0;
1205 }
1206 if (post_travel_evaluator == nullptr) {
1207 absl::c_fill(pre_visits, 0);
1208 } else {
1209 pre_visits[0] = 0;
1210 for (int i = 1; i < num_nodes; ++i) {
1211 pre_visits[i] = post_travel_evaluator(nodes[i - 1], nodes[i]);
1212 }
1213 }
1214}
1215
1217 int path, DimensionValues& dimension_values,
1218 absl::Span<const std::pair<int64_t, int64_t>> interbreaks) {
1219 using Interval = DimensionValues::Interval;
1220 using VehicleBreak = DimensionValues::VehicleBreak;
1221 const int64_t total_travel = dimension_values.TravelSums(path).back();
1222 // Improve bounds on span/start max/end min using time windows: breaks that
1223 // must occur inside the path have their duration accumulated into
1224 // lb_span_tw, they also widen [start_max, end_min).
1225 int64_t lb_span_tw = total_travel;
1226 const absl::Span<Interval> cumuls = dimension_values.MutableCumuls(path);
1227 Interval& start = cumuls.front();
1228 Interval& end = cumuls.back();
1229 // TODO(user): consider adding deductions from the path to the break.
1230 for (const VehicleBreak& br : dimension_values.MutableVehicleBreaks(path)) {
1231 // If the break may be performed before the path, after the path, or if it
1232 // is not performed at all, ignore the break.
1233 if (br.is_performed.min == 0) continue;
1234 if (br.end.min <= start.max || end.min <= br.start.max) continue;
1235 // This break must be performed inside the path: widen the path.
1236 CapAddTo(br.duration.min, &lb_span_tw);
1237 if (!start.DecreaseMax(br.start.max)) return false;
1238 if (!end.IncreaseMin(br.end.min)) return false;
1239 }
1240 Interval& span = dimension_values.MutableSpan(path);
1241 if (!span.IncreaseMin(std::max(lb_span_tw, CapSub(end.min, start.max)))) {
1242 return false;
1243 }
1244 // Compute num_feasible_breaks = number of breaks that may fit into route,
1245 // and [breaks_start_min, breaks_end_max) = max coverage of breaks.
1246 int64_t break_start_min = kint64max;
1247 int64_t break_end_max = kint64min;
1248
1249 if (!start.IncreaseMin(CapSub(end.min, span.max))) return false;
1250 if (!end.DecreaseMax(CapAdd(start.max, span.max))) return false;
1251
1252 int num_feasible_breaks = 0;
1253 for (const VehicleBreak& br : dimension_values.MutableVehicleBreaks(path)) {
1254 if (start.min <= br.start.max && br.end.min <= end.max) {
1255 break_start_min = std::min(break_start_min, br.start.min);
1256 break_end_max = std::max(break_end_max, br.end.max);
1257 ++num_feasible_breaks;
1258 }
1259 }
1260 // Improve span/start min/end max using interbreak limits: there must be
1261 // enough breaks inside the path, so that for each limit, the union of
1262 // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end),
1263 // or [start, end) is shorter than max_interbreak.
1264 for (const auto& [max_interbreak, min_break_duration] : interbreaks) {
1265 // Minimal number of breaks depends on total travel:
1266 // 0 breaks for 0 <= total travel <= limit,
1267 // 1 break for limit + 1 <= total travel <= 2 * limit,
1268 // i breaks for i * limit + 1 <= total travel <= (i+1) * limit, ...
1269 if (max_interbreak == 0) {
1270 if (total_travel > 0) return false;
1271 continue;
1272 }
1273 int64_t min_num_breaks =
1274 std::max<int64_t>(0, (total_travel - 1) / max_interbreak);
1275 if (span.min > max_interbreak) {
1276 min_num_breaks = std::max<int64_t>(min_num_breaks, 1);
1277 }
1278 if (min_num_breaks > num_feasible_breaks) return false;
1279 if (!span.IncreaseMin(CapAdd(
1280 total_travel, CapProd(min_num_breaks, min_break_duration)))) {
1281 return false;
1282 }
1283 if (min_num_breaks > 0) {
1284 if (!start.IncreaseMin(CapSub(break_start_min, max_interbreak))) {
1285 return false;
1286 }
1287 if (!end.DecreaseMax(CapAdd(break_end_max, max_interbreak))) {
1288 return false;
1289 }
1290 }
1291 }
1292 return start.DecreaseMax(CapSub(end.max, span.min)) &&
1293 end.IncreaseMin(CapAdd(start.min, span.min));
1294}
1295
1296namespace {
1297
1298class PathCumulFilter : public BasePathFilter {
1299 public:
1300 PathCumulFilter(const RoutingModel& routing_model,
1301 const RoutingDimension& dimension,
1302 bool propagate_own_objective_value,
1303 bool filter_objective_cost, bool may_use_optimizers);
1304 ~PathCumulFilter() override = default;
1305 std::string DebugString() const override {
1306 return "PathCumulFilter(" + name_ + ")";
1307 }
1308 int64_t GetSynchronizedObjectiveValue() const override {
1309 return propagate_own_objective_value_ ? synchronized_objective_value_ : 0;
1310 }
1311 int64_t GetAcceptedObjectiveValue() const override {
1312 return lns_detected() || !propagate_own_objective_value_
1313 ? 0
1314 : accepted_objective_value_;
1315 }
1316 bool UsesDimensionOptimizers() {
1317 if (!may_use_optimizers_) return false;
1318 for (int vehicle = 0; vehicle < routing_model_.vehicles(); ++vehicle) {
1319 if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) return true;
1320 }
1321 return false;
1322 }
1323
1324 private:
1325 using Interval = DimensionValues::Interval;
1326 struct SoftBound {
1327 int64_t bound = -1;
1328 int64_t coefficient = 0;
1329 };
1330
1331 // Data extractors used in constructor.
1332 std::vector<Interval> ExtractInitialCumulIntervals();
1333 std::vector<Interval> ExtractInitialSlackIntervals();
1334 std::vector<std::vector<RoutingDimension::NodePrecedence>>
1335 ExtractNodeIndexToPrecedences() const;
1336 std::vector<SoftBound> ExtractCumulSoftUpperBounds() const;
1337 std::vector<SoftBound> ExtractCumulSoftLowerBounds() const;
1338 std::vector<const PiecewiseLinearFunction*> ExtractCumulPiecewiseLinearCosts()
1339 const;
1340 std::vector<const RoutingModel::TransitCallback2*> ExtractEvaluators() const;
1341 using VehicleBreak = DimensionValues::VehicleBreak;
1342 std::vector<std::vector<VehicleBreak>> ExtractInitialVehicleBreaks() const;
1343
1344 bool InitializeAcceptPath() override {
1345 accepted_objective_value_ = synchronized_objective_value_;
1346 dimension_values_.Revert();
1347 cost_of_path_.Revert();
1348 global_span_cost_.Revert();
1349 location_of_node_.Revert();
1350 return true;
1351 }
1352 bool AcceptPath(int64_t path_start, int64_t chain_start,
1353 int64_t chain_end) override;
1354 bool FinalizeAcceptPath(int64_t objective_min,
1355 int64_t objective_max) override;
1356
1357 void OnBeforeSynchronizePaths(bool synchronizing_all_paths) override;
1358 void OnSynchronizePathFromStart(int64_t start) override;
1359 void OnAfterSynchronizePaths() override;
1360
1361 // TODO(user): consider making those methods external functions.
1362 // Fills the data of path in dimension_values_: nodes, cumuls, travels,
1363 // transits, travel_sums, span limits.
1364 bool FillDimensionValues(int path);
1365 // Propagates the transit constraint, cumul[r] + transit[r] == cumul[r+1],
1366 // in dimension_values_'s current path data.
1367 bool PropagateTransits(int path);
1368 // Propagates the transit constraint supposing that there are no forbidden
1369 // intervals for cumuls. This is faster than considering the intervals.
1370 bool PropagateTransitsWithoutForbiddenIntervals(int path);
1371 // Propagates both the transit constraint and cumul forbidden intervals.
1372 bool PropagateTransitsWithForbiddenIntervals(int path);
1373 // Propagates the span constraint, cumul[start] + span == cumul[end].
1374 bool PropagateSpan(int path);
1375 // Propagates cumul[delivery] <= cumul[pickup] + pickup_to_delivery_limit.
1376 bool PropagatePickupToDeliveryLimits(int path);
1377 // Propagates the vehicle breaks constraint.
1378 bool PropagateVehicleBreaks(int path);
1379
1380 // Returns true iff there are any precedence constraints to enforce.
1381 bool FilterPrecedences() const { return !node_index_to_precedences_.empty(); }
1382 // Returns true iff there is a global span cost.
1383 bool FilterGlobalSpanCost() const {
1384 return global_span_cost_coefficient_ != 0;
1385 }
1386 // Returns true iff the given path has vehicle breaks.
1387 bool FilterVehicleBreaks(int path) const {
1388 return dimension_.HasBreakConstraints() &&
1389 (!dimension_.GetBreakIntervalsOfVehicle(path).empty() ||
1390 !dimension_.GetBreakDistanceDurationOfVehicle(path).empty());
1391 }
1392 // Returns true iff the given path has a linear soft span cost.
1393 bool FilterSoftSpanCost(int path) const {
1394 return dimension_.HasSoftSpanUpperBounds() &&
1395 dimension_.GetSoftSpanUpperBoundForVehicle(path).cost > 0;
1396 }
1397 // Returns true iff the given path has a quadratic soft span cost.
1398 bool FilterSoftSpanQuadraticCost(int path) const {
1399 if (!dimension_.HasQuadraticCostSoftSpanUpperBounds()) return false;
1400 const auto [bound, cost] =
1401 dimension_.GetQuadraticCostSoftSpanUpperBoundForVehicle(path);
1402 return bound < kint64max && cost != 0;
1403 }
1404 // Returns true iff an LP/MP optimizer should be used for the given vehicle.
1405 bool FilterWithDimensionCumulOptimizerForVehicle(int vehicle) const {
1406 if (!may_use_optimizers_) return false;
1407 if (!cumul_piecewise_linear_costs_.empty()) return false;
1408
1409 int num_linear_constraints = 0;
1410 if (dimension_.GetSpanCostCoefficientForVehicle(vehicle) > 0 ||
1411 dimension_.GetSlackCostCoefficientForVehicle(vehicle) > 0) {
1412 ++num_linear_constraints;
1413 }
1414 if (FilterSoftSpanCost(vehicle)) ++num_linear_constraints;
1415 if (!cumul_soft_upper_bounds_.empty()) ++num_linear_constraints;
1416 if (!cumul_soft_lower_bounds_.empty()) ++num_linear_constraints;
1417 if (path_span_upper_bounds_[vehicle] < kint64max) {
1418 ++num_linear_constraints;
1419 }
1420 const bool has_breaks = FilterVehicleBreaks(vehicle);
1421 if (has_breaks) ++num_linear_constraints;
1422 // The DimensionCumulOptimizer is used to compute a more precise value of
1423 // the cost related to the cumul values (soft bounds and span/slack costs).
1424 // It is also used to guarantee feasibility with complex mixes of
1425 // constraints and in particular in the presence of break requests along
1426 // other constraints. Therefore, without breaks, we only use the optimizer
1427 // when the costs are actually used to filter the solutions, i.e. when
1428 // filter_objective_cost_ is true.
1429 return num_linear_constraints >= 2 &&
1430 (has_breaks || filter_objective_cost_);
1431 }
1432
1433 // Data about the routing problem, used as read-only input.
1434 const RoutingModel& routing_model_;
1435 const RoutingDimension& dimension_;
1436 const std::vector<Interval> initial_cumul_;
1437 const std::vector<Interval> initial_slack_;
1438 const std::vector<std::vector<VehicleBreak>> initial_vehicle_breaks_;
1439 // Maps vehicle/path to their values, values are always present.
1440 const std::vector<const RoutingModel::TransitCallback2*> evaluators_;
1441 const std::vector<int64_t> path_capacities_;
1442 const std::vector<int64_t> path_span_upper_bounds_;
1443 const std::vector<int64_t> path_total_slack_cost_coefficients_;
1444
1445 // Global span cost data.
1446 // Global span cost coefficient: the global span interval is
1447 // [min_{vehicle} cumul(start), max_{vehicle} cumul(end)),
1448 // over vehicle that are nonempty or used when empty.
1449 // The global span cost is the length of the interval times this coefficient.
1450 //
1451 // This class keeps data about active paths at Synchronize(): paths sorted
1452 // by decreasing span min, decreasing end min, and increasing start max.
1453 // At Accept(), it first propagates all pathwise constraints in changed
1454 // paths. Then it computes:
1455 // - active_start_max = min_{active vehicle} cumul_[vehicle].front().max
1456 // - active_end_min = max_{active vehicle} cumul_[vehicle].back().min
1457 // - active_span_min = max_{active vehicle} span_[vehicle].min,
1458 // which allow to compute a lower bound on the global span, as
1459 // max(active_end_min - active_start_max, active_span_min, 0).
1460 //
1461 // To find the active_ values, this class merges bounds from the active paths,
1462 // among changed paths and unchanged paths:
1463 // - changed paths have their new bounds in dimension_values_.
1464 // - since committed paths have their bounds sorted in the vectors below,
1465 // scanning for the first path that is unchanged yields the min or max
1466 // value for all unchanged paths.
1467 // Both types of scan are done in O(#changed paths) time.
1468 const int64_t global_span_cost_coefficient_;
1469 struct ValuedPath {
1470 int64_t value;
1471 int path;
1472 bool operator<(const ValuedPath& other) const {
1473 return std::tie(value, path) < std::tie(other.value, other.path);
1474 }
1475 bool operator>(const ValuedPath& other) const {
1476 return std::tie(value, path) > std::tie(other.value, other.path);
1477 }
1478 };
1479 // Committed active paths, sorted by decreasing span min.
1480 std::vector<ValuedPath> paths_by_decreasing_span_min_;
1481 // Committed active paths, sorted by decreasing end min.
1482 std::vector<ValuedPath> paths_by_decreasing_end_min_;
1483 // Committed active paths, sorted by increasing start max.
1484 std::vector<ValuedPath> paths_by_increasing_start_max_;
1485 // Holds committed/current global span cost.
1486 CommittableValue<int64_t> global_span_cost_;
1487
1488 // Empty if there are no soft bounds, otherwise maps each node to its bound.
1489 const std::vector<SoftBound> cumul_soft_upper_bounds_;
1490 // Empty if there are no soft bounds, otherwise maps each node to its bound.
1491 const std::vector<SoftBound> cumul_soft_lower_bounds_;
1492 // Empty if there are no linear costs, otherwise maps each node to its cost.
1493 const std::vector<const PiecewiseLinearFunction*>
1494 cumul_piecewise_linear_costs_;
1495 // True iff some cumul has forbidden intervals.
1496 const bool has_forbidden_intervals_;
1497
1498 // Data reflecting information on path variables for the committed and the
1499 // current solution.
1500 DimensionValues dimension_values_;
1501 // Maps each path to the sum of its path-only costs: span/slack cost,
1502 // soft cumul costs, soft span limits.
1503 CommittableArray<int64_t> cost_of_path_;
1504 int64_t synchronized_objective_value_;
1505 int64_t accepted_objective_value_;
1506
1507 struct RankAndIndex {
1508 int rank = -1;
1509 int index = -1;
1510 };
1511 // When examining a path for pickup to delivery limits, allows to find if
1512 // some pickup alternative is on the path, and at what rank (position).
1513 // Only the Revert() method of CommittableVector is used, to reset all
1514 // locations to the default dummy value before examining a path.
1515 CommittableArray<RankAndIndex> pickup_rank_and_alternative_index_of_pair_;
1516
1517 // node_index_to_precedences_[node_index] contains all NodePrecedence elements
1518 // with node_index as either "first_node" or "second_node".
1519 // This vector is empty if there are no precedences on the dimension_.
1520 const std::vector<std::vector<RoutingDimension::NodePrecedence>>
1521 node_index_to_precedences_;
1522 struct PathAndRank {
1523 int path = -1;
1524 int rank = -1;
1525 };
1526 // Maps the location of each node in the committed and current solutions.
1527 // This is used when enforcing precedence constraints
1528 CommittableArray<PathAndRank> location_of_node_;
1529
1530 // Name of the dimension, for debugging/profiling purposes.
1531 const std::string name_;
1532
1533 LocalDimensionCumulOptimizer* lp_optimizer_;
1534 LocalDimensionCumulOptimizer* mp_optimizer_;
1535 const std::function<int64_t(int64_t)> path_accessor_;
1536 const bool filter_objective_cost_;
1537 // True iff this object may use an LP or MP optimizer to solve sub-problems.
1538 const bool may_use_optimizers_;
1539 const bool propagate_own_objective_value_;
1540};
1541
1542namespace {
1543template <typename T>
1544std::vector<T> SumOfVectors(const std::vector<T>& v1,
1545 const std::vector<T>& v2) {
1546 DCHECK_EQ(v1.size(), v2.size());
1547 std::vector<T> sum(v1.size());
1548 absl::c_transform(v1, v2, sum.begin(), [](T a, T b) { return CapAdd(a, b); });
1549 return sum;
1550}
1551} // namespace
1552
1553std::vector<PathCumulFilter::Interval>
1554PathCumulFilter::ExtractInitialCumulIntervals() {
1555 std::vector<Interval> intervals;
1556 intervals.reserve(dimension_.cumuls().size());
1557 for (const IntVar* cumul : dimension_.cumuls()) {
1558 intervals.push_back({cumul->Min(), cumul->Max()});
1559 }
1560 return intervals;
1561}
1562
1563std::vector<PathCumulFilter::Interval>
1564PathCumulFilter::ExtractInitialSlackIntervals() {
1565 std::vector<Interval> intervals;
1566 intervals.reserve(dimension_.slacks().size());
1567 for (const IntVar* slack : dimension_.slacks()) {
1568 intervals.push_back({slack->Min(), slack->Max()});
1569 }
1570 return intervals;
1571}
1572
1573std::vector<PathCumulFilter::SoftBound>
1574PathCumulFilter::ExtractCumulSoftUpperBounds() const {
1575 const int num_cumuls = dimension_.cumuls().size();
1576 std::vector<SoftBound> bounds(num_cumuls,
1577 {.bound = kint64max, .coefficient = 0});
1578 bool has_some_bound = false;
1579 for (int i = 0; i < num_cumuls; ++i) {
1580 if (!dimension_.HasCumulVarSoftUpperBound(i)) continue;
1581 const int64_t bound = dimension_.GetCumulVarSoftUpperBound(i);
1582 const int64_t coeff = dimension_.GetCumulVarSoftUpperBoundCoefficient(i);
1583 bounds[i] = {.bound = bound, .coefficient = coeff};
1584 has_some_bound |= bound < kint64max && coeff != 0;
1585 }
1586 if (!has_some_bound) bounds.clear();
1587 return bounds;
1588}
1589
1590std::vector<PathCumulFilter::SoftBound>
1591PathCumulFilter::ExtractCumulSoftLowerBounds() const {
1592 const int num_cumuls = dimension_.cumuls().size();
1593 std::vector<SoftBound> bounds(num_cumuls, {.bound = 0, .coefficient = 0});
1594 bool has_some_bound = false;
1595 for (int i = 0; i < num_cumuls; ++i) {
1596 if (!dimension_.HasCumulVarSoftLowerBound(i)) continue;
1597 const int64_t bound = dimension_.GetCumulVarSoftLowerBound(i);
1598 const int64_t coeff = dimension_.GetCumulVarSoftLowerBoundCoefficient(i);
1599 bounds[i] = {.bound = bound, .coefficient = coeff};
1600 has_some_bound |= bound > 0 && coeff != 0;
1601 }
1602 if (!has_some_bound) bounds.clear();
1603 return bounds;
1604}
1605
1606std::vector<const PiecewiseLinearFunction*>
1607PathCumulFilter::ExtractCumulPiecewiseLinearCosts() const {
1608 const int num_cumuls = dimension_.cumuls().size();
1609 std::vector<const PiecewiseLinearFunction*> costs(num_cumuls, nullptr);
1610 bool has_some_cost = false;
1611 for (int i = 0; i < dimension_.cumuls().size(); ++i) {
1612 if (!dimension_.HasCumulVarPiecewiseLinearCost(i)) continue;
1613 const PiecewiseLinearFunction* const cost =
1614 dimension_.GetCumulVarPiecewiseLinearCost(i);
1615 if (cost == nullptr) continue;
1616 has_some_cost = true;
1617 costs[i] = cost;
1618 }
1619 if (!has_some_cost) costs.clear();
1620 return costs;
1621}
1622
1623std::vector<const RoutingModel::TransitCallback2*>
1624PathCumulFilter::ExtractEvaluators() const {
1625 const int num_paths = NumPaths();
1626 std::vector<const RoutingModel::TransitCallback2*> evaluators(num_paths);
1627 for (int i = 0; i < num_paths; ++i) {
1628 evaluators[i] = &dimension_.transit_evaluator(i);
1629 }
1630 return evaluators;
1631}
1632
1633std::vector<std::vector<RoutingDimension::NodePrecedence>>
1634PathCumulFilter::ExtractNodeIndexToPrecedences() const {
1635 std::vector<std::vector<RoutingDimension::NodePrecedence>>
1636 node_index_to_precedences;
1637 const std::vector<RoutingDimension::NodePrecedence>& node_precedences =
1638 dimension_.GetNodePrecedences();
1639 if (!node_precedences.empty()) {
1640 node_index_to_precedences.resize(initial_cumul_.size());
1641 for (const auto& node_precedence : node_precedences) {
1642 node_index_to_precedences[node_precedence.first_node].push_back(
1643 node_precedence);
1644 node_index_to_precedences[node_precedence.second_node].push_back(
1645 node_precedence);
1646 }
1647 }
1648 return node_index_to_precedences;
1649}
1650
1651std::vector<std::vector<DimensionValues::VehicleBreak>>
1652PathCumulFilter::ExtractInitialVehicleBreaks() const {
1653 const int num_vehicles = routing_model_.vehicles();
1654 std::vector<std::vector<VehicleBreak>> vehicle_breaks(num_vehicles);
1655 if (!dimension_.HasBreakConstraints()) return vehicle_breaks;
1656 for (int v = 0; v < num_vehicles; ++v) {
1657 for (const IntervalVar* br : dimension_.GetBreakIntervalsOfVehicle(v)) {
1658 vehicle_breaks[v].push_back(
1659 {.start = {.min = br->StartMin(), .max = br->StartMax()},
1660 .end = {.min = br->EndMin(), .max = br->EndMax()},
1661 .duration = {.min = br->DurationMin(), .max = br->DurationMax()},
1662 .is_performed = {.min = br->MustBePerformed(),
1663 .max = br->MayBePerformed()}});
1664 }
1665 }
1666 return vehicle_breaks;
1667}
1668
1669PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model,
1670 const RoutingDimension& dimension,
1671 bool propagate_own_objective_value,
1672 bool filter_objective_cost,
1673 bool may_use_optimizers)
1674 : BasePathFilter(routing_model.Nexts(), dimension.cumuls().size(),
1675 routing_model.GetPathsMetadata()),
1676 routing_model_(routing_model),
1677 dimension_(dimension),
1678 initial_cumul_(ExtractInitialCumulIntervals()),
1679 initial_slack_(ExtractInitialSlackIntervals()),
1680 initial_vehicle_breaks_(ExtractInitialVehicleBreaks()),
1681 evaluators_(ExtractEvaluators()),
1682
1683 path_capacities_(dimension.vehicle_capacities()),
1684 path_span_upper_bounds_(dimension.vehicle_span_upper_bounds()),
1685 path_total_slack_cost_coefficients_(
1686 SumOfVectors(dimension.vehicle_span_cost_coefficients(),
1687 dimension.vehicle_slack_cost_coefficients())),
1688 global_span_cost_coefficient_(dimension.global_span_cost_coefficient()),
1689 global_span_cost_(0),
1690 cumul_soft_upper_bounds_(ExtractCumulSoftUpperBounds()),
1691 cumul_soft_lower_bounds_(ExtractCumulSoftLowerBounds()),
1692 cumul_piecewise_linear_costs_(ExtractCumulPiecewiseLinearCosts()),
1693 has_forbidden_intervals_(
1694 absl::c_any_of(dimension.forbidden_intervals(),
1695 [](const SortedDisjointIntervalList& intervals) {
1696 return intervals.NumIntervals() > 0;
1697 })),
1698
1699 dimension_values_(routing_model.vehicles(), dimension.cumuls().size()),
1700 cost_of_path_(NumPaths(), 0),
1701 synchronized_objective_value_(0),
1702 accepted_objective_value_(0),
1703 pickup_rank_and_alternative_index_of_pair_(
1704 routing_model_.GetPickupAndDeliveryPairs().size(), {-1, -1}),
1705 node_index_to_precedences_(ExtractNodeIndexToPrecedences()),
1706 location_of_node_(dimension.cumuls().size(), {-1, -1}),
1707 name_(dimension.name()),
1708 lp_optimizer_(routing_model.GetMutableLocalCumulLPOptimizer(dimension)),
1709 mp_optimizer_(routing_model.GetMutableLocalCumulMPOptimizer(dimension)),
1710 path_accessor_([this](int64_t node) { return GetNext(node); }),
1711 filter_objective_cost_(filter_objective_cost),
1712 may_use_optimizers_(may_use_optimizers),
1713 propagate_own_objective_value_(propagate_own_objective_value) {
1714#ifndef NDEBUG
1715 for (int vehicle = 0; vehicle < routing_model.vehicles(); vehicle++) {
1716 if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) {
1717 DCHECK_NE(lp_optimizer_, nullptr);
1718 DCHECK_NE(mp_optimizer_, nullptr);
1719 }
1720 }
1721#endif // NDEBUG
1722}
1723
1724bool PathCumulFilter::PropagateTransits(int path) {
1725 if (has_forbidden_intervals_) {
1726 return PropagateTransitsWithForbiddenIntervals(path);
1727 } else {
1728 return PropagateTransitsWithoutForbiddenIntervals(path);
1729 }
1730}
1731
1732bool PathCumulFilter::PropagateTransitsWithForbiddenIntervals(int path) {
1733 DCHECK_LT(0, dimension_values_.NumNodes(path));
1734 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1735 absl::Span<const Interval> transits = dimension_values_.Transits(path);
1736 absl::Span<const int> nodes = dimension_values_.Nodes(path);
1737 const int num_nodes = dimension_values_.NumNodes(path);
1738 auto reduce_to_allowed_interval = [this](Interval& interval,
1739 int node) -> bool {
1740 DCHECK(!interval.IsEmpty());
1741 interval.min = dimension_.GetFirstPossibleGreaterOrEqualValueForNode(
1742 node, interval.min);
1743 if (interval.IsEmpty()) return false;
1744 interval.max =
1745 dimension_.GetLastPossibleLessOrEqualValueForNode(node, interval.max);
1746 return !interval.IsEmpty();
1747 };
1748 // Propagate from start to end.
1749 if (!reduce_to_allowed_interval(cumuls[0], nodes[0])) return false;
1750 Interval cumul = cumuls[0];
1751 for (int r = 1; r < num_nodes; ++r) {
1752 cumul.Add(transits[r - 1]);
1753 if (!cumul.IntersectWith(cumuls[r])) return false;
1754 if (!reduce_to_allowed_interval(cumul, nodes[r])) return false;
1755 cumuls[r] = cumul;
1756 }
1757 // Propagate from end to start.
1758 for (int r = num_nodes - 2; r >= 0; --r) {
1759 cumul.Subtract(transits[r]);
1760 if (!cumul.IntersectWith(cumuls[r])) return false;
1761 if (!reduce_to_allowed_interval(cumul, nodes[r])) return false;
1762 cumuls[r] = cumul;
1763 }
1764 return true;
1765}
1766
1767bool PathCumulFilter::PropagateTransitsWithoutForbiddenIntervals(int path) {
1768 DCHECK_LT(0, dimension_values_.NumNodes(path));
1769 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1770 absl::Span<const Interval> transits = dimension_values_.Transits(path);
1771 const int num_nodes = dimension_values_.NumNodes(path);
1772 // Propagate from start to end.
1773 Interval cumul = cumuls.front();
1774 for (int r = 1; r < num_nodes; ++r) {
1775 cumul.Add(transits[r - 1]);
1776 if (!cumul.IntersectWith(cumuls[r])) return false;
1777 cumuls[r] = cumul;
1778 }
1779 // Propagate from end to start.
1780 for (int r = num_nodes - 2; r >= 0; --r) {
1781 cumul.Subtract(transits[r]);
1782 if (!cumul.IntersectWith(cumuls[r])) return false;
1783 cumuls[r] = cumul;
1784 }
1785 return true;
1786}
1787
1788bool PathCumulFilter::PropagateSpan(int path) {
1789 absl::Span<const int64_t> travel_sums = dimension_values_.TravelSums(path);
1790 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1791 // Copy values to make it clear to the compiler we are working on different
1792 // values, we'll commit them back at the end of the modifications.
1793 Interval start = cumuls.front();
1794 Interval end = cumuls.back();
1795 Interval span = dimension_values_.Span(path);
1796 if (!span.IncreaseMin(travel_sums.back())) return false;
1797 // We propagate equation: end - start - span = 0.
1798 // In the <= 0 part of this equation, consider two of the terms at their min
1799 // and filter the max of the last term.
1800 if (!end.DecreaseMax(CapAdd(start.max, span.max))) return false;
1801 if (!start.IncreaseMin(CapSub(end.min, span.max))) return false;
1802 if (!span.IncreaseMin(CapSub(end.min, start.max))) return false;
1803 // In the >= 0 part of this equation, consider two of the terms at their max
1804 // and filter the min of the last term.
1805 if (!span.DecreaseMax(CapSub(end.max, start.min))) return false;
1806 if (!end.IncreaseMin(CapAdd(start.min, span.min))) return false;
1807 if (!start.DecreaseMax(CapSub(end.max, span.min))) return false;
1808 // Commit back to input data structures.
1809 cumuls.front() = start;
1810 cumuls.back() = end;
1811 dimension_values_.MutableSpan(path) = span;
1812 return true;
1813}
1814
1815bool PathCumulFilter::FillDimensionValues(int path) {
1816 // Fill nodes.
1817 int node = Start(path);
1818 dimension_values_.PushNode(node);
1819 while (node < Size()) {
1820 const int next = GetNext(node);
1821 dimension_values_.PushNode(next);
1822 node = next;
1823 }
1824 dimension_values_.MakePathFromNewNodes(path);
1826 path, path_capacities_[path], path_span_upper_bounds_[path],
1827 initial_cumul_, initial_slack_, *evaluators_[path],
1828 dimension_values_)) {
1829 return false;
1830 }
1831 dimension_values_.MutableVehicleBreaks(path) = initial_vehicle_breaks_[path];
1832 return true;
1833}
1834
1835bool PathCumulFilter::PropagatePickupToDeliveryLimits(int path) {
1836 const int num_nodes = dimension_values_.NumNodes(path);
1837 absl::Span<const int> nodes = dimension_values_.Nodes(path);
1838 absl::Span<const int64_t> travel_sums = dimension_values_.TravelSums(path);
1839 absl::Span<Interval> cumuls = dimension_values_.MutableCumuls(path);
1840
1841 // For each paired node, find whether it has a sibling.
1842 // This does not check whether the pickup-delivery constraint is satisfied,
1843 // only that if some pickup alternative and delivery alternative of a pair
1844 // appear in the same path, they do respect the pickup-delivery limit.
1845 // First, allocate memory if necessary.
1846 pickup_rank_and_alternative_index_of_pair_.Revert();
1847 for (int rank = 1; rank < num_nodes - 1; ++rank) {
1848 const int node = nodes[rank];
1849 const std::optional<RoutingModel::PickupDeliveryPosition> pickup_pos =
1850 routing_model_.GetPickupPosition(node);
1851 // If node is a pickup, record its position and continue;
1852 if (pickup_pos.has_value()) {
1853 const auto [pair_index, pickup_alternative_index] = *pickup_pos;
1854 pickup_rank_and_alternative_index_of_pair_.Set(
1855 pair_index, {.rank = rank, .index = pickup_alternative_index});
1856 continue;
1857 }
1858 // Propagate the limit if:
1859 // - node is a delivery,
1860 // - a corresponding pickup is in the path,
1861 // - and there is a nontrivial limit.
1862 const std::optional<RoutingModel::PickupDeliveryPosition> delivery_pos =
1863 routing_model_.GetDeliveryPosition(node);
1864 if (!delivery_pos.has_value()) continue;
1865 const auto [pair_index, delivery_alt_index] = *delivery_pos;
1866 const auto [pickup_rank, pickup_alt_index] =
1867 pickup_rank_and_alternative_index_of_pair_.Get(pair_index);
1868 if (pickup_rank == -1) continue;
1869
1870 const int64_t limit = dimension_.GetPickupToDeliveryLimitForPair(
1871 pair_index, pickup_alt_index, delivery_alt_index);
1872 if (limit == kint64max) continue;
1873
1874 // If the total travel between pickup and delivery is larger than the
1875 // limit, we can reject immediately.
1876 const int64_t total_travel =
1877 CapSub(travel_sums[rank], travel_sums[pickup_rank]);
1878 if (total_travel > limit) return false;
1879
1880 // Otherwise, propagate cumul[rank] <= cumul[pickup_rank] + limit.
1881 if (!cumuls[rank].DecreaseMax(CapAdd(cumuls[pickup_rank].max, limit))) {
1882 return false;
1883 }
1884 if (!cumuls[pickup_rank].IncreaseMin(CapSub(cumuls[rank].min, limit))) {
1885 return false;
1886 }
1887 }
1888 return true;
1889}
1890
1891bool PathCumulFilter::PropagateVehicleBreaks(int path) {
1893 path, dimension_values_,
1894 dimension_.GetBreakDistanceDurationOfVehicle(path));
1895}
1896
1897bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/,
1898 int64_t /*chain_end*/) {
1899 const int path = GetPath(path_start);
1900 if (!FillDimensionValues(path)) return false;
1901
1902 // Filter feasibility: cumul windows, transit, span, breaks.
1903 // The first PropagateSpan() is mostly used to check feasibility of total
1904 // travel within span max, the second can tighten all start/end/span bounds.
1905 if (!PropagateSpan(path) || !PropagateTransits(path) ||
1906 !PropagateSpan(path)) {
1907 return false;
1908 }
1909 if (dimension_.HasPickupToDeliveryLimits()) {
1910 if (!PropagatePickupToDeliveryLimits(path)) return false;
1911 // Re-propagate span and transits.
1912 if (!PropagateSpan(path) || !PropagateTransits(path) ||
1913 !PropagateSpan(path)) {
1914 return false;
1915 }
1916 }
1917 if (FilterVehicleBreaks(path) && !PropagateVehicleBreaks(path)) return false;
1918
1919 // Filter costs: span (linear/quadratic/piecewise),
1920 // soft cumul windows (linear/piecewise).
1921 if (!filter_objective_cost_) return true;
1922
1923 CapSubFrom(cost_of_path_.Get(path), &accepted_objective_value_);
1924 if (routing_model_.IsEnd(GetNext(path_start)) &&
1925 !routing_model_.IsVehicleUsedWhenEmpty(path)) {
1926 cost_of_path_.Set(path, 0);
1927 return true;
1928 }
1929
1930 const absl::Span<const Interval> cumuls = dimension_values_.Cumuls(path);
1931 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
1932 const Interval span = dimension_values_.Span(path);
1933 const int64_t total_travel = dimension_values_.TravelSums(path).back();
1934 const int64_t min_total_slack = CapSub(span.min, total_travel);
1935
1936 int64_t new_path_cost = 0;
1937 // Add span and slack costs.
1938 CapAddTo(CapProd(path_total_slack_cost_coefficients_[path], min_total_slack),
1939 &new_path_cost);
1940 // Add soft span costs.
1941 if (dimension_.HasSoftSpanUpperBounds()) {
1942 const auto [bound, cost] = dimension_.GetSoftSpanUpperBoundForVehicle(path);
1943 CapAddTo(CapProd(cost, std::max<int64_t>(0, CapSub(span.min, bound))),
1944 &new_path_cost);
1945 }
1946 if (dimension_.HasQuadraticCostSoftSpanUpperBounds()) {
1947 const auto [bound, cost] =
1948 dimension_.GetQuadraticCostSoftSpanUpperBoundForVehicle(path);
1949 const int64_t violation = std::max<int64_t>(0, CapSub(span.min, bound));
1950 CapAddTo(CapProd(cost, CapProd(violation, violation)), &new_path_cost);
1951 }
1952 // Add soft cumul lower bound costs.
1953 const int num_path_nodes = dimension_values_.NumNodes(path);
1954 if (!cumul_soft_lower_bounds_.empty()) {
1955 for (int r = 0; r < num_path_nodes; ++r) {
1956 const auto [bound, coef] = cumul_soft_lower_bounds_[nodes[r]];
1957 CapAddTo(
1958 CapProd(coef, std::max<int64_t>(0, CapSub(bound, cumuls[r].max))),
1959 &new_path_cost);
1960 }
1961 }
1962 // Add soft cumul upper bound costs.
1963 if (!cumul_soft_upper_bounds_.empty()) {
1964 for (int r = 0; r < num_path_nodes; ++r) {
1965 const auto [bound, coef] = cumul_soft_upper_bounds_[nodes[r]];
1966 CapAddTo(
1967 CapProd(coef, std::max<int64_t>(0, CapSub(cumuls[r].min, bound))),
1968 &new_path_cost);
1969 }
1970 }
1971 // Add piecewise linear costs.
1972 // TODO(user): this supposes the function to be increasing,
1973 // make it correct in other cases, at least decreasing functions.
1974 if (!cumul_piecewise_linear_costs_.empty()) {
1975 for (int r = 0; r < num_path_nodes; ++r) {
1976 const PiecewiseLinearFunction* cost =
1977 cumul_piecewise_linear_costs_[nodes[r]];
1978 if (cost == nullptr) continue;
1979 CapAddTo(cost->Value(cumuls[r].min), &new_path_cost);
1980 }
1981 }
1982 // Replace committed cost of this path new cost.
1983 CapAddTo(new_path_cost, &accepted_objective_value_);
1984 cost_of_path_.Set(path, new_path_cost);
1985 return true;
1986}
1987
1988bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/,
1989 int64_t objective_max) {
1990 if (lns_detected()) return true;
1991 if (FilterPrecedences()) {
1992 // Find location of all nodes: remove committed nodes of changed paths,
1993 // then add nodes of changed paths. This removes nodes that became loops.
1994 for (const int path : dimension_values_.ChangedPaths()) {
1995 for (const int node : dimension_values_.CommittedNodes(path)) {
1996 location_of_node_.Set(node, {.path = -1, .rank = -1});
1997 }
1998 }
1999 for (const int path : dimension_values_.ChangedPaths()) {
2000 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
2001 const int num_nodes = nodes.size();
2002 for (int rank = 0; rank < num_nodes; ++rank) {
2003 location_of_node_.Set(nodes[rank], {.path = path, .rank = rank});
2004 }
2005 }
2006 // For every node in changed path, check precedences.
2007 for (const int path : dimension_values_.ChangedPaths()) {
2008 const absl::Span<const int> nodes = dimension_values_.Nodes(path);
2009 const int num_nodes = nodes.size();
2010 for (int rank = 0; rank < num_nodes; ++rank) {
2011 const int node = nodes[rank];
2012 for (const auto [first_node, second_node, offset,
2013 performed_constraint] :
2014 node_index_to_precedences_[node]) {
2015 const auto [path1, rank1] = location_of_node_.Get(first_node);
2016 const auto [path2, rank2] = location_of_node_.Get(second_node);
2017 if (path1 == -1 && !IsVarSynced(first_node)) continue;
2018 if (path2 == -1 && !IsVarSynced(second_node)) continue;
2019 switch (RoutingDimension::GetPrecedenceStatus(
2020 path1 == -1, path2 == -1, performed_constraint)) {
2021 case RoutingDimension::PrecedenceStatus::kActive:
2022 break;
2023 case RoutingDimension::PrecedenceStatus::kInactive:
2024 continue;
2025 case RoutingDimension::PrecedenceStatus::kInvalid:
2026 return false;
2027 }
2028 DCHECK(node == first_node || node == second_node);
2029 DCHECK_EQ(first_node, dimension_values_.Nodes(path1)[rank1]);
2030 DCHECK_EQ(second_node, dimension_values_.Nodes(path2)[rank2]);
2031 // Check that cumul1 + offset <= cumul2 is feasible.
2032 if (CapAdd(dimension_values_.Cumuls(path1)[rank1].min, offset) >
2033 dimension_values_.Cumuls(path2)[rank2].max)
2034 return false;
2035 }
2036 }
2037 }
2038 }
2039 if (filter_objective_cost_ && FilterGlobalSpanCost()) {
2040 int64_t global_end_min = kint64min;
2041 int64_t global_start_max = kint64max;
2042 int64_t global_span_min = 0;
2043 // Find global values of changed paths.
2044 for (const int path : dimension_values_.ChangedPaths()) {
2045 if (dimension_values_.NumNodes(path) == 0) continue;
2046 if (dimension_values_.NumNodes(path) == 2 &&
2047 !routing_model_.IsVehicleUsedWhenEmpty(path)) {
2048 continue;
2049 }
2050 global_span_min =
2051 std::max(global_span_min, dimension_values_.Span(path).min);
2052 const int64_t end_min = dimension_values_.Cumuls(path).back().min;
2053 global_end_min = std::max(global_end_min, end_min);
2054 const int64_t start_max = dimension_values_.Cumuls(path).front().max;
2055 global_start_max = std::min(global_start_max, start_max);
2056 }
2057 // Find global values of unchanged paths, merge with those of changed paths.
2058 for (const auto& [span_min, path] : paths_by_decreasing_span_min_) {
2059 if (dimension_values_.PathHasChanged(path)) continue;
2060 global_span_min = std::max(global_span_min, span_min);
2061 break;
2062 }
2063 for (const auto& [end_min, path] : paths_by_decreasing_end_min_) {
2064 if (dimension_values_.PathHasChanged(path)) continue;
2065 global_end_min = std::max(global_end_min, end_min);
2066 break;
2067 }
2068 for (const auto& [start_max, path] : paths_by_increasing_start_max_) {
2069 if (dimension_values_.PathHasChanged(path)) continue;
2070 global_start_max = std::min(global_start_max, start_max);
2071 break;
2072 }
2073 // Compute global span min.
2074 global_span_min =
2075 std::max(global_span_min, CapSub(global_end_min, global_start_max));
2076 const int64_t global_span_cost =
2077 CapProd(global_span_min, global_span_cost_coefficient_);
2078 CapSubFrom(global_span_cost_.Get(), &accepted_objective_value_);
2079 global_span_cost_.Set(global_span_cost);
2080 CapAddTo(global_span_cost, &accepted_objective_value_);
2081 }
2082
2083 // Filtering on objective value, calling LPs and MIPs if needed.
2084 if (may_use_optimizers_ && lp_optimizer_ != nullptr &&
2085 accepted_objective_value_ <= objective_max) {
2086 std::vector<int> paths_requiring_mp_optimizer;
2087 // TODO(user): Further optimize the LPs when we find feasible-only
2088 // solutions with the original time shares, if there's time left in the end.
2089 int solve_duration_shares = dimension_values_.ChangedPaths().size();
2090 for (const int vehicle : dimension_values_.ChangedPaths()) {
2091 if (!FilterWithDimensionCumulOptimizerForVehicle(vehicle)) {
2092 continue;
2093 }
2094 int64_t path_cost_with_lp = 0;
2095 const DimensionSchedulingStatus status =
2096 lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
2097 vehicle, /*solve_duration_ratio=*/1.0 / solve_duration_shares,
2098 path_accessor_, /*resource=*/nullptr,
2099 filter_objective_cost_ ? &path_cost_with_lp : nullptr);
2100 solve_duration_shares--;
2101 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2102 return false;
2103 }
2104 // Replace previous path cost with the LP optimizer cost.
2105 if (filter_objective_cost_ &&
2106 (status == DimensionSchedulingStatus::OPTIMAL ||
2107 status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) &&
2108 path_cost_with_lp > cost_of_path_.Get(vehicle)) {
2109 CapSubFrom(cost_of_path_.Get(vehicle), &accepted_objective_value_);
2110 CapAddTo(path_cost_with_lp, &accepted_objective_value_);
2111 if (accepted_objective_value_ > objective_max) return false;
2112 cost_of_path_.Set(vehicle, path_cost_with_lp);
2113 }
2114 // Mark whether the cost could be improved by using mp_optimizer.
2115 DCHECK_NE(mp_optimizer_, nullptr);
2116 if (FilterVehicleBreaks(vehicle) ||
2117 (filter_objective_cost_ &&
2118 (FilterSoftSpanQuadraticCost(vehicle) ||
2119 status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY))) {
2120 paths_requiring_mp_optimizer.push_back(vehicle);
2121 }
2122 }
2123
2124 DCHECK_LE(accepted_objective_value_, objective_max);
2125
2126 solve_duration_shares = paths_requiring_mp_optimizer.size();
2127 for (const int vehicle : paths_requiring_mp_optimizer) {
2128 int64_t path_cost_with_mp = 0;
2129 const DimensionSchedulingStatus status =
2130 mp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
2131 vehicle, /*solve_duration_ratio=*/1.0 / solve_duration_shares,
2132 path_accessor_, /*resource=*/nullptr,
2133 filter_objective_cost_ ? &path_cost_with_mp : nullptr);
2134 solve_duration_shares--;
2135 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2136 return false;
2137 }
2138 if (filter_objective_cost_ &&
2139 status == DimensionSchedulingStatus::OPTIMAL &&
2140 path_cost_with_mp > cost_of_path_.Get(vehicle)) {
2141 CapSubFrom(cost_of_path_.Get(vehicle), &accepted_objective_value_);
2142 CapAddTo(path_cost_with_mp, &accepted_objective_value_);
2143 if (accepted_objective_value_ > objective_max) return false;
2144 cost_of_path_.Set(vehicle, path_cost_with_mp);
2145 }
2146 }
2147 }
2148 return accepted_objective_value_ <= objective_max;
2149}
2150
2151void PathCumulFilter::OnBeforeSynchronizePaths(bool synchronizing_all_paths) {
2152 if (synchronizing_all_paths) {
2153 // All paths are being synchronized, so we can reset all the data and let
2154 // OnSynchronizePathFromStart() calls recompute everything. Otherwise we let
2155 // the InitializeAcceptPath() call below revert the data structures.
2156 dimension_values_.Reset();
2157 cost_of_path_.SetAllAndCommit(0);
2158 location_of_node_.SetAllAndCommit({-1, -1});
2159 global_span_cost_.SetAndCommit(0);
2160 synchronized_objective_value_ = 0; // Accept() relies on this value.
2161 }
2162
2163 accepted_objective_value_ = synchronized_objective_value_;
2164 if (HasAnySyncedPath()) {
2165 // Revert the data structures that are used to compute the bounds and
2166 // pathwise costs of the new solution incrementally.
2167 InitializeAcceptPath();
2168 }
2169}
2170
2171void PathCumulFilter::OnSynchronizePathFromStart(int64_t start) {
2172 DCHECK(IsVarSynced(start));
2173 // Using AcceptPath() to compute the bounds and pathwise costs of the new
2174 // solution incrementally.
2175 const int path = GetPath(start);
2176 const bool is_accepted = AcceptPath(start, start, End(path));
2177 DCHECK(is_accepted);
2178}
2179
2180void PathCumulFilter::OnAfterSynchronizePaths() {
2181 if (filter_objective_cost_ && FilterGlobalSpanCost()) {
2182 // TODO(user): check whether this could go into FinalizeAcceptPath(),
2183 // with a Commit() on some data structure.
2184 paths_by_decreasing_span_min_.clear();
2185 paths_by_decreasing_end_min_.clear();
2186 paths_by_increasing_start_max_.clear();
2187 for (int path = 0; path < NumPaths(); ++path) {
2188 const int num_path_nodes = dimension_values_.Nodes(path).size();
2189 if (num_path_nodes == 0 ||
2190 (num_path_nodes == 2 &&
2191 !routing_model_.IsVehicleUsedWhenEmpty(path))) {
2192 continue;
2193 }
2194 paths_by_decreasing_span_min_.push_back(
2195 {.value = dimension_values_.Span(path).min, .path = path});
2196 paths_by_decreasing_end_min_.push_back(
2197 {.value = dimension_values_.Cumuls(path).back().min, .path = path});
2198 paths_by_increasing_start_max_.push_back(
2199 {.value = dimension_values_.Cumuls(path)[0].max, .path = path});
2200 }
2201 absl::c_sort(paths_by_decreasing_span_min_, std::greater<ValuedPath>());
2202 absl::c_sort(paths_by_decreasing_end_min_, std::greater<ValuedPath>());
2203 absl::c_sort(paths_by_increasing_start_max_);
2204 }
2205 // Using FinalizeAcceptPath() to commit all data structures to this state.
2206 FinalizeAcceptPath(kint64min, kint64max);
2207 dimension_values_.Commit();
2208 cost_of_path_.Commit();
2209 global_span_cost_.Commit();
2210 location_of_node_.Commit();
2211 synchronized_objective_value_ = accepted_objective_value_;
2212}
2213
2214} // namespace
2215
2216IntVarLocalSearchFilter* MakePathCumulFilter(const RoutingDimension& dimension,
2217 bool propagate_own_objective_value,
2218 bool filter_objective_cost,
2219 bool may_use_optimizers) {
2220 RoutingModel& model = *dimension.model();
2221 return model.solver()->RevAlloc(
2222 new PathCumulFilter(model, dimension, propagate_own_objective_value,
2223 filter_objective_cost, may_use_optimizers));
2224}
2225
2226namespace {
2227
2228bool DimensionHasCumulCost(const RoutingDimension& dimension) {
2229 if (dimension.global_span_cost_coefficient() != 0) return true;
2230 if (dimension.HasSoftSpanUpperBounds()) return true;
2231 if (dimension.HasQuadraticCostSoftSpanUpperBounds()) return true;
2232 if (absl::c_any_of(dimension.vehicle_span_cost_coefficients(),
2233 [](int64_t coefficient) { return coefficient != 0; })) {
2234 return true;
2235 }
2236 if (absl::c_any_of(dimension.vehicle_slack_cost_coefficients(),
2237 [](int64_t coefficient) { return coefficient != 0; })) {
2238 return true;
2239 }
2240 for (int i = 0; i < dimension.cumuls().size(); ++i) {
2241 if (dimension.HasCumulVarSoftUpperBound(i)) return true;
2242 if (dimension.HasCumulVarSoftLowerBound(i)) return true;
2243 if (dimension.HasCumulVarPiecewiseLinearCost(i)) return true;
2244 }
2245 return false;
2246}
2247
2248bool DimensionHasPathCumulConstraint(const RoutingDimension& dimension) {
2249 if (dimension.HasBreakConstraints()) return true;
2250 if (dimension.HasPickupToDeliveryLimits()) return true;
2251 if (absl::c_any_of(
2252 dimension.vehicle_span_upper_bounds(), [](int64_t upper_bound) {
2253 return upper_bound != std::numeric_limits<int64_t>::max();
2254 })) {
2255 return true;
2256 }
2257 if (absl::c_any_of(dimension.slacks(),
2258 [](IntVar* slack) { return slack->Min() > 0; })) {
2259 return true;
2260 }
2261 const std::vector<IntVar*>& cumuls = dimension.cumuls();
2262 for (int i = 0; i < cumuls.size(); ++i) {
2263 IntVar* const cumul_var = cumuls[i];
2264 if (cumul_var->Min() > 0 &&
2265 cumul_var->Max() < std::numeric_limits<int64_t>::max() &&
2266 !dimension.model()->IsEnd(i)) {
2267 return true;
2268 }
2269 if (dimension.forbidden_intervals()[i].NumIntervals() > 0) return true;
2270 }
2271 return false;
2272}
2273
2274} // namespace
2275
2277 const PathState* path_state,
2278 const std::vector<RoutingDimension*>& dimensions,
2279 std::vector<LocalSearchFilterManager::FilterEvent>* filters) {
2280 using Interval = DimensionChecker::Interval;
2281 // For every dimension that fits, add a DimensionChecker.
2282 // Add a DimensionChecker for every dimension.
2283 for (const RoutingDimension* dimension : dimensions) {
2284 // Fill path capacities and classes.
2285 const int num_vehicles = dimension->model()->vehicles();
2286 std::vector<Interval> path_capacity(num_vehicles);
2287 std::vector<int> path_class(num_vehicles);
2288 for (int v = 0; v < num_vehicles; ++v) {
2289 const auto& vehicle_capacities = dimension->vehicle_capacities();
2290 path_capacity[v] = {0, vehicle_capacities[v]};
2291 path_class[v] = dimension->vehicle_to_class(v);
2292 }
2293 // For each class, retrieve the demands of each node.
2294 // Dimension store evaluators with a double indirection for compacity:
2295 // vehicle -> vehicle_class -> evaluator_index.
2296 // We replicate this in DimensionChecker,
2297 // except we expand evaluator_index to an array of values for all nodes.
2298 const int num_vehicle_classes =
2299 1 + *std::max_element(path_class.begin(), path_class.end());
2300 const int num_cumuls = dimension->cumuls().size();
2301 const int num_slacks = dimension->slacks().size();
2302 std::vector<std::function<Interval(int64_t, int64_t)>> transits(
2303 num_vehicle_classes, nullptr);
2304 for (int vehicle = 0; vehicle < num_vehicles; ++vehicle) {
2305 const int vehicle_class = path_class[vehicle];
2306 if (transits[vehicle_class] != nullptr) continue;
2307 const auto& unary_evaluator =
2308 dimension->GetUnaryTransitEvaluator(vehicle);
2309 if (unary_evaluator != nullptr) {
2310 transits[vehicle_class] = [&unary_evaluator, dimension, num_slacks](
2311 int64_t node, int64_t) -> Interval {
2312 if (node >= num_slacks) return {0, 0};
2313 const int64_t min_transit = unary_evaluator(node);
2314 const int64_t max_transit =
2315 CapAdd(min_transit, dimension->SlackVar(node)->Max());
2316 return {min_transit, max_transit};
2317 };
2318 } else {
2319 const auto& binary_evaluator =
2320 dimension->GetBinaryTransitEvaluator(vehicle);
2321
2322 transits[vehicle_class] = [&binary_evaluator, dimension, num_slacks](
2323 int64_t node, int64_t next) -> Interval {
2324 if (node >= num_slacks) return {0, 0};
2325 const int64_t min_transit = binary_evaluator(node, next);
2326 const int64_t max_transit =
2327 CapAdd(min_transit, dimension->SlackVar(node)->Max());
2328 return {min_transit, max_transit};
2329 };
2330 }
2331 }
2332 // Fill node capacities.
2333 std::vector<Interval> node_capacity(num_cumuls);
2334 for (int node = 0; node < num_cumuls; ++node) {
2335 const IntVar* cumul = dimension->CumulVar(node);
2336 node_capacity[node] = {cumul->Min(), cumul->Max()};
2337 }
2338 // Make the dimension checker and pass ownership to the filter.
2339 auto checker = std::make_unique<DimensionChecker>(
2340 path_state, std::move(path_capacity), std::move(path_class),
2341 std::move(transits), std::move(node_capacity));
2344 dimension->model()->solver(), std::move(checker), dimension->name());
2345 filters->push_back({filter, kAccept});
2346 }
2347}
2348
2350 const std::vector<RoutingDimension*>& dimensions,
2351 const RoutingSearchParameters& parameters, bool filter_objective_cost,
2352 bool use_chain_cumul_filter,
2353 std::vector<LocalSearchFilterManager::FilterEvent>* filters) {
2355 // Filter priority depth increases with complexity of filtering.
2356 // - Dimensions without any cumul-related costs or constraints will have a
2357 // ChainCumulFilter, lowest priority depth.
2358 // - Dimensions with cumul costs or constraints, but no global span cost
2359 // and/or precedences will have a PathCumulFilter.
2360 // - Dimensions with a global span cost coefficient and/or precedences will
2361 // have a global LP filter.
2362 const int num_dimensions = dimensions.size();
2363
2364 const bool has_dimension_optimizers =
2365 !parameters.disable_scheduling_beware_this_may_degrade_performance();
2366 std::vector<bool> use_path_cumul_filter(num_dimensions);
2367 std::vector<bool> use_cumul_bounds_propagator_filter(num_dimensions);
2368 std::vector<bool> use_global_lp_filter(num_dimensions);
2369 std::vector<bool> use_resource_assignment_filter(num_dimensions);
2370 for (int d = 0; d < num_dimensions; d++) {
2371 const RoutingDimension& dimension = *dimensions[d];
2372 const bool has_cumul_cost = DimensionHasCumulCost(dimension);
2373 use_path_cumul_filter[d] =
2374 has_cumul_cost || DimensionHasPathCumulConstraint(dimension);
2375
2376 const int num_dimension_resource_groups =
2377 dimension.model()->GetDimensionResourceGroupIndices(&dimension).size();
2378 const bool can_use_cumul_bounds_propagator_filter =
2379 !dimension.HasBreakConstraints() &&
2380 num_dimension_resource_groups == 0 &&
2381 (!filter_objective_cost || !has_cumul_cost);
2382 const bool has_precedences = !dimension.GetNodePrecedences().empty();
2383 use_global_lp_filter[d] =
2384 has_dimension_optimizers &&
2385 ((has_precedences && !can_use_cumul_bounds_propagator_filter) ||
2386 (filter_objective_cost &&
2387 dimension.global_span_cost_coefficient() > 0) ||
2388 num_dimension_resource_groups > 1);
2389
2390 use_cumul_bounds_propagator_filter[d] =
2391 has_precedences && !use_global_lp_filter[d];
2392
2393 use_resource_assignment_filter[d] =
2394 has_dimension_optimizers && num_dimension_resource_groups > 0;
2395 }
2396
2397 for (int d = 0; d < num_dimensions; d++) {
2398 const RoutingDimension& dimension = *dimensions[d];
2399 const RoutingModel& model = *dimension.model();
2400 // NOTE: We always add the [Chain|Path]CumulFilter to filter each route's
2401 // feasibility separately to try and cut bad decisions earlier in the
2402 // search, but we don't propagate the computed cost if the LPCumulFilter is
2403 // already doing it.
2404 const bool use_global_lp = use_global_lp_filter[d];
2405 const bool filter_resource_assignment = use_resource_assignment_filter[d];
2406 if (use_path_cumul_filter[d]) {
2407 PathCumulFilter* filter = model.solver()->RevAlloc(new PathCumulFilter(
2408 model, dimension, /*propagate_own_objective_value*/ !use_global_lp &&
2409 !filter_resource_assignment,
2410 filter_objective_cost, has_dimension_optimizers));
2411 const int priority = filter->UsesDimensionOptimizers() ? 1 : 0;
2412 filters->push_back({filter, kAccept, priority});
2413 } else if (use_chain_cumul_filter) {
2414 filters->push_back(
2415 {model.solver()->RevAlloc(new ChainCumulFilter(model, dimension)),
2416 kAccept, /*priority*/ 0});
2417 }
2418
2419 if (use_cumul_bounds_propagator_filter[d]) {
2420 DCHECK(!use_global_lp);
2421 DCHECK(!filter_resource_assignment);
2422 filters->push_back({MakeCumulBoundsPropagatorFilter(dimension), kAccept,
2423 /*priority*/ 2});
2424 }
2425
2426 if (filter_resource_assignment) {
2427 filters->push_back({MakeResourceAssignmentFilter(
2428 model.GetMutableLocalCumulLPOptimizer(dimension),
2429 model.GetMutableLocalCumulMPOptimizer(dimension),
2430 /*propagate_own_objective_value*/ !use_global_lp,
2431 filter_objective_cost),
2432 kAccept, /*priority*/ 3});
2433 }
2434
2435 if (use_global_lp) {
2436 filters->push_back({MakeGlobalLPCumulFilter(
2437 model.GetMutableGlobalCumulLPOptimizer(dimension),
2438 model.GetMutableGlobalCumulMPOptimizer(dimension),
2439 filter_objective_cost),
2440 kAccept, /*priority*/ 4});
2441 }
2442 }
2443}
2444
2445namespace {
2446
2447// Filter for pickup/delivery precedences.
2448class PickupDeliveryFilter : public LocalSearchFilter {
2449 public:
2450 PickupDeliveryFilter(const PathState* path_state,
2451 absl::Span<const PickupDeliveryPair> pairs,
2452 const std::vector<RoutingModel::PickupAndDeliveryPolicy>&
2453 vehicle_policies);
2454 ~PickupDeliveryFilter() override = default;
2455 bool Accept(const Assignment* /*delta*/, const Assignment* /*deltadelta*/,
2456 int64_t /*objective_min*/, int64_t /*objective_max*/) override;
2457
2458 void Reset() override;
2459 void Synchronize(const Assignment* /*assignment*/,
2460 const Assignment* /*delta*/) override;
2461 std::string DebugString() const override { return "PickupDeliveryFilter"; }
2462
2463 private:
2464 template <bool check_assigned_pairs>
2465 bool AcceptPathDispatch();
2466 template <bool check_assigned_pairs>
2467 bool AcceptPathDefault(int path);
2468 template <bool lifo, bool check_assigned_pairs>
2469 bool AcceptPathOrdered(int path);
2470 void AssignAllVisitedPairsAndLoopNodes();
2471
2472 const PathState* const path_state_;
2473 struct PairInfo {
2474 // @TODO(user): Use default member initializers once we drop C++17
2475 // support on github.
2476 bool is_paired : 1;
2477 bool is_pickup : 1;
2478 int pair_index : 30;
2479 PairInfo() : is_paired(false), pair_index(-1) {}
2480 PairInfo(bool is_paired, bool is_pickup, int pair_index)
2481 : is_paired(is_paired), is_pickup(is_pickup), pair_index(pair_index) {}
2482 };
2483 std::vector<PairInfo> pair_info_of_node_;
2484 struct PairStatus {
2485 // @TODO(user): Use default member initializers once we drop C++17
2486 // support on github.
2487 bool pickup : 1;
2488 bool delivery : 1;
2489 PairStatus() : pickup(false), delivery(false) {}
2490 };
2491 CommittableArray<PairStatus> assigned_status_of_pair_;
2492 SparseBitset<int> pair_is_open_;
2493 CommittableValue<int> num_assigned_pairs_;
2494 std::deque<int> visited_deque_;
2495 const std::vector<RoutingModel::PickupAndDeliveryPolicy> vehicle_policies_;
2496};
2497
2498PickupDeliveryFilter::PickupDeliveryFilter(
2499 const PathState* path_state, absl::Span<const PickupDeliveryPair> pairs,
2500 const std::vector<RoutingModel::PickupAndDeliveryPolicy>& vehicle_policies)
2501 : path_state_(path_state),
2502 pair_info_of_node_(path_state->NumNodes()),
2503 assigned_status_of_pair_(pairs.size(), {}),
2504 pair_is_open_(pairs.size()),
2505 num_assigned_pairs_(0),
2506 vehicle_policies_(vehicle_policies) {
2507 for (int pair_index = 0; pair_index < pairs.size(); ++pair_index) {
2508 const auto& [pickups, deliveries] = pairs[pair_index];
2509 for (const int pickup : pickups) {
2510 pair_info_of_node_[pickup] =
2511 // @TODO(user): Use aggregate initialization once we drop C++17
2512 // support on github.
2513 PairInfo{/*is_paired=*/true, /*is_pickup=*/true,
2514 /*pair_index=*/pair_index};
2515 }
2516 for (const int delivery : deliveries) {
2517 pair_info_of_node_[delivery] =
2518 // @TODO(user): Use aggregate initialization once we drop C++17
2519 // support on github.
2520 PairInfo{/*is_paired=*/true, /*is_pickup=*/false,
2521 /*pair_index=*/pair_index};
2522 }
2523 }
2524}
2525
2526void PickupDeliveryFilter::Reset() {
2527 assigned_status_of_pair_.SetAllAndCommit({});
2528 num_assigned_pairs_.SetAndCommit(0);
2529}
2530
2531void PickupDeliveryFilter::AssignAllVisitedPairsAndLoopNodes() {
2532 assigned_status_of_pair_.Revert();
2533 num_assigned_pairs_.Revert();
2534 int num_assigned_pairs = num_assigned_pairs_.Get();
2535 if (num_assigned_pairs == assigned_status_of_pair_.Size()) return;
2536 // If node is a pickup or delivery, this sets the assigned_status_of_pair_
2537 // status to true, and returns true if the whole pair *became* assigned.
2538 auto assign_node = [this](int node) -> bool {
2539 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2540 if (!is_paired) return false;
2541 bool assigned_pair = false;
2542 PairStatus assigned_status = assigned_status_of_pair_.Get(pair_index);
2543 if (is_pickup && !assigned_status.pickup) {
2544 assigned_pair = assigned_status.delivery;
2545 assigned_status.pickup = true;
2546 assigned_status_of_pair_.Set(pair_index, assigned_status);
2547 }
2548 if (!is_pickup && !assigned_status.delivery) {
2549 assigned_pair = assigned_status.pickup;
2550 assigned_status.delivery = true;
2551 assigned_status_of_pair_.Set(pair_index, assigned_status);
2552 }
2553 return assigned_pair;
2554 };
2555 for (const int path : path_state_->ChangedPaths()) {
2556 for (const int node : path_state_->Nodes(path)) {
2557 num_assigned_pairs += assign_node(node) ? 1 : 0;
2558 }
2559 }
2560 for (const int loop : path_state_->ChangedLoops()) {
2561 num_assigned_pairs += assign_node(loop) ? 1 : 0;
2562 }
2563 num_assigned_pairs_.Set(num_assigned_pairs);
2564}
2565
2566void PickupDeliveryFilter::Synchronize(const Assignment* /*assignment*/,
2567 const Assignment* /*delta*/) {
2568 AssignAllVisitedPairsAndLoopNodes();
2569 assigned_status_of_pair_.Commit();
2570 num_assigned_pairs_.Commit();
2571}
2572
2573bool PickupDeliveryFilter::Accept(const Assignment* /*delta*/,
2574 const Assignment* /*deltadelta*/,
2575 int64_t /*objective_min*/,
2576 int64_t /*objective_max*/) {
2577 if (path_state_->IsInvalid()) return true; // Protect against CP-LNS.
2578 AssignAllVisitedPairsAndLoopNodes();
2579 const bool check_assigned_pairs =
2580 num_assigned_pairs_.Get() < assigned_status_of_pair_.Size();
2581 if (check_assigned_pairs) {
2582 return AcceptPathDispatch<true>();
2583 } else {
2584 return AcceptPathDispatch<false>();
2585 }
2586}
2587
2588template <bool check_assigned_pairs>
2589bool PickupDeliveryFilter::AcceptPathDispatch() {
2590 for (const int path : path_state_->ChangedPaths()) {
2591 switch (vehicle_policies_[path]) {
2592 case RoutingModel::PICKUP_AND_DELIVERY_NO_ORDER:
2593 if (!AcceptPathDefault<check_assigned_pairs>(path)) return false;
2594 break;
2595 case RoutingModel::PICKUP_AND_DELIVERY_LIFO:
2596 if (!AcceptPathOrdered<true, check_assigned_pairs>(path)) return false;
2597 break;
2598 case RoutingModel::PICKUP_AND_DELIVERY_FIFO:
2599 if (!AcceptPathOrdered<false, check_assigned_pairs>(path)) return false;
2600 break;
2601 default:
2602 continue;
2603 }
2604 }
2605 return true;
2606}
2607
2608template <bool check_assigned_pairs>
2609bool PickupDeliveryFilter::AcceptPathDefault(int path) {
2610 pair_is_open_.ResetAllToFalse();
2611 int num_opened_pairs = 0;
2612 for (const int node : path_state_->Nodes(path)) {
2613 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2614 if (!is_paired) continue;
2615 if constexpr (check_assigned_pairs) {
2616 const PairStatus status = assigned_status_of_pair_.Get(pair_index);
2617 if (!status.pickup || !status.delivery) continue;
2618 }
2619 if (is_pickup) {
2620 pair_is_open_.Set(pair_index);
2621 ++num_opened_pairs;
2622 } else {
2623 if (!pair_is_open_[pair_index]) return false;
2624 pair_is_open_.Clear(pair_index);
2625 --num_opened_pairs;
2626 }
2627 }
2628 // For all visited pickup/delivery where both sides are assigned,
2629 // the whole pair must be visited.
2630 if (num_opened_pairs > 0) return false;
2631 pair_is_open_.NotifyAllClear();
2632 return true;
2633}
2634
2635template <bool lifo, bool check_assigned_pairs>
2636bool PickupDeliveryFilter::AcceptPathOrdered(int path) {
2637 visited_deque_.clear();
2638 for (const int node : path_state_->Nodes(path)) {
2639 const auto [is_paired, is_pickup, pair_index] = pair_info_of_node_[node];
2640 if (!is_paired) continue;
2641 if constexpr (check_assigned_pairs) {
2642 const PairStatus status = assigned_status_of_pair_.Get(pair_index);
2643 if (!status.pickup || !status.delivery) continue;
2644 }
2645 if (is_pickup) {
2646 visited_deque_.emplace_back(pair_index);
2647 } else {
2648 if (visited_deque_.empty()) return false;
2649 if constexpr (lifo) {
2650 const int last_pair_index = visited_deque_.back();
2651 if (last_pair_index != pair_index) return false;
2652 visited_deque_.pop_back();
2653 } else {
2654 const int first_pair_index = visited_deque_.front();
2655 if (first_pair_index != pair_index) return false;
2656 visited_deque_.pop_front();
2657 }
2658 }
2659 }
2660 return visited_deque_.empty();
2661}
2662
2663} // namespace
2664
2666 const RoutingModel& routing_model, const PathState* path_state,
2667 const std::vector<PickupDeliveryPair>& pairs,
2668 const std::vector<RoutingModel::PickupAndDeliveryPolicy>&
2669 vehicle_policies) {
2670 return routing_model.solver()->RevAlloc(
2671 new PickupDeliveryFilter(path_state, pairs, vehicle_policies));
2672}
2673
2674namespace {
2675
2676// Vehicle variable filter
2677class VehicleVarFilter : public LocalSearchFilter {
2678 public:
2679 VehicleVarFilter(const RoutingModel& routing_model,
2680 const PathState* path_state);
2681 bool Accept(const Assignment* delta, const Assignment* deltadelta,
2682 int64_t objective_min, int64_t objective_max) override;
2683 void Synchronize(const Assignment* /*assignment*/,
2684 const Assignment* /*delta*/) override;
2685 std::string DebugString() const override { return "VehicleVariableFilter"; }
2686
2687 private:
2688 bool HasConstrainedVehicleVars() const;
2689
2690 const PathState* path_state_;
2691 std::vector<IntVar*> vehicle_vars_;
2692 const int num_vehicles_;
2693 bool is_disabled_;
2694};
2695
2696VehicleVarFilter::VehicleVarFilter(const RoutingModel& routing_model,
2697 const PathState* path_state)
2698 : path_state_(path_state),
2699 vehicle_vars_(routing_model.VehicleVars()),
2700 num_vehicles_(routing_model.vehicles()),
2701 is_disabled_(!HasConstrainedVehicleVars()) {}
2702
2703bool VehicleVarFilter::HasConstrainedVehicleVars() const {
2704 for (const IntVar* var : vehicle_vars_) {
2705 const int unconstrained_size = num_vehicles_ + ((var->Min() >= 0) ? 0 : 1);
2706 if (var->Size() != unconstrained_size) return true;
2707 }
2708 return false;
2709}
2710
2711void VehicleVarFilter::Synchronize(const Assignment* /*assignment*/,
2712 const Assignment* /*delta*/) {
2713 is_disabled_ = !HasConstrainedVehicleVars();
2714}
2715
2716bool VehicleVarFilter::Accept(const Assignment* /*delta*/,
2717 const Assignment* /*deltadelta*/,
2718 int64_t /*objective_min*/,
2719 int64_t /*objective_max*/) {
2720 if (is_disabled_) return true;
2721 for (const int path : path_state_->ChangedPaths()) {
2722 // First and last chain are committed on the vehicle, no need to check.
2723 for (const PathState::Chain chain :
2724 path_state_->Chains(path).DropFirstChain().DropLastChain()) {
2725 for (const int node : chain) {
2726 if (!vehicle_vars_[node]->Contains(path)) return false;
2727 }
2728 }
2729 }
2730 return true;
2731}
2732
2733} // namespace
2734
2735LocalSearchFilter* MakeVehicleVarFilter(const RoutingModel& routing_model,
2736 const PathState* path_state) {
2737 return routing_model.solver()->RevAlloc(
2738 new VehicleVarFilter(routing_model, path_state));
2739}
2740
2741namespace {
2742
2743class CumulBoundsPropagatorFilter : public IntVarLocalSearchFilter {
2744 public:
2745 explicit CumulBoundsPropagatorFilter(const RoutingDimension& dimension);
2746 bool Accept(const Assignment* delta, const Assignment* deltadelta,
2747 int64_t objective_min, int64_t objective_max) override;
2748 std::string DebugString() const override {
2749 return "CumulBoundsPropagatorFilter(" + propagator_.dimension().name() +
2750 ")";
2751 }
2752
2753 private:
2754 CumulBoundsPropagator propagator_;
2755 const int64_t cumul_offset_;
2756 SparseBitset<int64_t> delta_touched_;
2757 std::vector<int64_t> delta_nexts_;
2758};
2759
2760CumulBoundsPropagatorFilter::CumulBoundsPropagatorFilter(
2761 const RoutingDimension& dimension)
2762 : IntVarLocalSearchFilter(dimension.model()->Nexts()),
2763 propagator_(&dimension),
2764 cumul_offset_(dimension.GetGlobalOptimizerOffset()),
2765 delta_touched_(Size()),
2766 delta_nexts_(Size()) {}
2767
2768bool CumulBoundsPropagatorFilter::Accept(const Assignment* delta,
2769 const Assignment* /*deltadelta*/,
2770 int64_t /*objective_min*/,
2771 int64_t /*objective_max*/) {
2772 delta_touched_.ResetAllToFalse();
2773 for (const IntVarElement& delta_element :
2774 delta->IntVarContainer().elements()) {
2775 int64_t index = -1;
2776 if (FindIndex(delta_element.Var(), &index)) {
2777 if (!delta_element.Bound()) {
2778 // LNS detected
2779 return true;
2780 }
2781 delta_touched_.Set(index);
2782 delta_nexts_[index] = delta_element.Value();
2783 }
2784 }
2785 const auto& next_accessor = [this](int64_t index) -> int64_t {
2786 return delta_touched_[index] ? delta_nexts_[index]
2787 : !IsVarSynced(index) ? -1
2788 : Value(index);
2789 };
2790
2791 return propagator_.PropagateCumulBounds(next_accessor, cumul_offset_);
2792}
2793
2794} // namespace
2795
2797 const RoutingDimension& dimension) {
2798 return dimension.model()->solver()->RevAlloc(
2799 new CumulBoundsPropagatorFilter(dimension));
2800}
2801
2802namespace {
2803
2804class LPCumulFilter : public IntVarLocalSearchFilter {
2805 public:
2806 LPCumulFilter(const std::vector<IntVar*>& nexts,
2807 GlobalDimensionCumulOptimizer* optimizer,
2808 GlobalDimensionCumulOptimizer* mp_optimizer,
2809 bool filter_objective_cost);
2810 bool Accept(const Assignment* delta, const Assignment* deltadelta,
2811 int64_t objective_min, int64_t objective_max) override;
2812 int64_t GetAcceptedObjectiveValue() const override;
2813 void OnSynchronize(const Assignment* delta) override;
2814 int64_t GetSynchronizedObjectiveValue() const override;
2815 std::string DebugString() const override {
2816 return "LPCumulFilter(" + lp_optimizer_.dimension()->name() + ")";
2817 }
2818
2819 private:
2820 GlobalDimensionCumulOptimizer& lp_optimizer_;
2821 GlobalDimensionCumulOptimizer& mp_optimizer_;
2822 const bool filter_objective_cost_;
2823 int64_t synchronized_cost_without_transit_;
2824 int64_t delta_cost_without_transit_;
2825 SparseBitset<int64_t> delta_touched_;
2826 std::vector<int64_t> delta_nexts_;
2827};
2828
2829LPCumulFilter::LPCumulFilter(const std::vector<IntVar*>& nexts,
2830 GlobalDimensionCumulOptimizer* lp_optimizer,
2831 GlobalDimensionCumulOptimizer* mp_optimizer,
2832 bool filter_objective_cost)
2833 : IntVarLocalSearchFilter(nexts),
2834 lp_optimizer_(*lp_optimizer),
2835 mp_optimizer_(*mp_optimizer),
2836 filter_objective_cost_(filter_objective_cost),
2837 synchronized_cost_without_transit_(-1),
2838 delta_cost_without_transit_(-1),
2839 delta_touched_(Size()),
2840 delta_nexts_(Size()) {}
2841
2842bool LPCumulFilter::Accept(const Assignment* delta,
2843 const Assignment* /*deltadelta*/,
2844 int64_t /*objective_min*/, int64_t objective_max) {
2845 delta_touched_.ResetAllToFalse();
2846 for (const IntVarElement& delta_element :
2847 delta->IntVarContainer().elements()) {
2848 int64_t index = -1;
2849 if (FindIndex(delta_element.Var(), &index)) {
2850 if (!delta_element.Bound()) {
2851 // LNS detected
2852 return true;
2853 }
2854 delta_touched_.Set(index);
2855 delta_nexts_[index] = delta_element.Value();
2856 }
2857 }
2858 const auto& next_accessor = [this](int64_t index) {
2859 return delta_touched_[index] ? delta_nexts_[index]
2860 : !IsVarSynced(index) ? -1
2861 : Value(index);
2862 };
2863
2864 if (!filter_objective_cost_) {
2865 // No need to compute the cost of the LP, only verify its feasibility.
2866 delta_cost_without_transit_ = 0;
2867 DimensionSchedulingStatus status = lp_optimizer_.ComputeCumuls(
2868 next_accessor, {}, nullptr, nullptr, nullptr);
2869 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) {
2870 status = mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr,
2871 nullptr);
2872 }
2873 DCHECK(status != DimensionSchedulingStatus::FEASIBLE)
2874 << "FEASIBLE without filtering objective cost should be OPTIMAL";
2875 return status == DimensionSchedulingStatus::OPTIMAL;
2876 }
2877
2879 lp_optimizer_.ComputeCumulCostWithoutFixedTransits(
2880 next_accessor, &delta_cost_without_transit_);
2881
2882 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY ||
2883 status == DimensionSchedulingStatus::FEASIBLE) {
2884 const DimensionSchedulingStatus lp_status = status;
2885 int64_t mp_cost;
2886 status = mp_optimizer_.ComputeCumulCostWithoutFixedTransits(next_accessor,
2887 &mp_cost);
2888 if (lp_status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY &&
2889 status == DimensionSchedulingStatus::OPTIMAL) {
2890 // TRICKY: If the MP is only feasible, the computed cost isn't a lower
2891 // bound to the problem, so we keep the LP relaxation's lower bound
2892 // found by Glop.
2893 delta_cost_without_transit_ = mp_cost;
2894 } else if (lp_status == DimensionSchedulingStatus::FEASIBLE &&
2895 status != DimensionSchedulingStatus::INFEASIBLE) {
2896 // TRICKY: Since feasible costs are not lower bounds, we keep the lowest
2897 // of the costs between the LP-feasible and CP-SAT (feasible or optimal).
2898 delta_cost_without_transit_ =
2899 std::min(delta_cost_without_transit_, mp_cost);
2900 }
2901 }
2902
2903 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2904 delta_cost_without_transit_ = std::numeric_limits<int64_t>::max();
2905 return false;
2906 }
2907 return delta_cost_without_transit_ <= objective_max;
2908}
2909
2910int64_t LPCumulFilter::GetAcceptedObjectiveValue() const {
2911 return delta_cost_without_transit_;
2912}
2913
2914void LPCumulFilter::OnSynchronize(const Assignment* /*delta*/) {
2915 // TODO(user): Try to optimize this so the LP is not called when the last
2916 // computed delta cost corresponds to the solution being synchronized.
2917 const RoutingModel& model = *lp_optimizer_.dimension()->model();
2918 const auto& next_accessor = [this, &model](int64_t index) {
2919 return IsVarSynced(index) ? Value(index)
2920 : model.IsStart(index) ? model.End(model.VehicleIndex(index))
2921 : -1;
2922 };
2923
2924 if (!filter_objective_cost_) {
2925 synchronized_cost_without_transit_ = 0;
2926 }
2928 filter_objective_cost_
2929 ? lp_optimizer_.ComputeCumulCostWithoutFixedTransits(
2930 next_accessor, &synchronized_cost_without_transit_)
2931 : lp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr, nullptr,
2932 nullptr);
2933 if (status == DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY) {
2934 status = filter_objective_cost_
2935 ? mp_optimizer_.ComputeCumulCostWithoutFixedTransits(
2936 next_accessor, &synchronized_cost_without_transit_)
2937 : mp_optimizer_.ComputeCumuls(next_accessor, {}, nullptr,
2938 nullptr, nullptr);
2939 }
2940 if (status == DimensionSchedulingStatus::INFEASIBLE) {
2941 // TODO(user): This should only happen if the LP/MIP solver times out.
2942 // DCHECK the fail wasn't due to an infeasible model.
2943 synchronized_cost_without_transit_ = 0;
2944 }
2945}
2946
2947int64_t LPCumulFilter::GetSynchronizedObjectiveValue() const {
2948 return synchronized_cost_without_transit_;
2949}
2950
2951} // namespace
2952
2954 GlobalDimensionCumulOptimizer* lp_optimizer,
2955 GlobalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost) {
2956 DCHECK_NE(lp_optimizer, nullptr);
2957 DCHECK_NE(mp_optimizer, nullptr);
2958 const RoutingModel& model = *lp_optimizer->dimension()->model();
2959 return model.solver()->RevAlloc(new LPCumulFilter(
2960 model.Nexts(), lp_optimizer, mp_optimizer, filter_objective_cost));
2961}
2962
2963namespace {
2964
2965using ResourceGroup = RoutingModel::ResourceGroup;
2966
2967class ResourceGroupAssignmentFilter : public BasePathFilter {
2968 public:
2969 ResourceGroupAssignmentFilter(const std::vector<IntVar*>& nexts,
2970 const ResourceGroup* resource_group,
2971 LocalDimensionCumulOptimizer* lp_optimizer,
2972 LocalDimensionCumulOptimizer* mp_optimizer,
2973 bool filter_objective_cost);
2974 bool InitializeAcceptPath() override;
2975 bool AcceptPath(int64_t path_start, int64_t, int64_t) override;
2976 bool FinalizeAcceptPath(int64_t objective_min,
2977 int64_t objective_max) override;
2978 void OnBeforeSynchronizePaths(bool synchronizing_all_paths) override;
2979 void OnSynchronizePathFromStart(int64_t start) override;
2980 void OnAfterSynchronizePaths() override;
2981
2982 int64_t GetAcceptedObjectiveValue() const override {
2983 return lns_detected() ? 0 : delta_cost_without_transit_;
2984 }
2985 int64_t GetSynchronizedObjectiveValue() const override {
2986 return synchronized_cost_without_transit_;
2987 }
2988 std::string DebugString() const override {
2989 return "ResourceGroupAssignmentFilter(" + dimension_.name() + ")";
2990 }
2991
2992 private:
2993 using RCIndex = RoutingModel::ResourceClassIndex;
2994
2995 bool VehicleRequiresResourceAssignment(
2996 int vehicle, const std::function<int64_t(int64_t)>& next_accessor,
2997 bool* is_infeasible);
2998 int64_t ComputeRouteCumulCostWithoutResourceAssignment(
2999 int vehicle, const std::function<int64_t(int64_t)>& next_accessor) const;
3000
3001 const RoutingModel& model_;
3002 const RoutingDimension& dimension_;
3003 const ResourceGroup& resource_group_;
3004 LocalDimensionCumulOptimizer* lp_optimizer_;
3005 LocalDimensionCumulOptimizer* mp_optimizer_;
3006 const bool filter_objective_cost_;
3007 bool current_synch_failed_;
3008 int64_t synchronized_cost_without_transit_;
3009 int64_t delta_cost_without_transit_;
3010 std::vector<std::vector<int64_t>> vehicle_to_resource_class_assignment_costs_;
3011 std::vector<int> vehicles_requiring_resource_assignment_;
3012 std::vector<bool> vehicle_requires_resource_assignment_;
3013 std::vector<std::vector<int64_t>>
3014 delta_vehicle_to_resource_class_assignment_costs_;
3015 std::vector<int> delta_vehicles_requiring_resource_assignment_;
3016 std::vector<bool> delta_vehicle_requires_resource_assignment_;
3017
3018 std::vector<int> bound_resource_index_of_vehicle_;
3019 util_intops::StrongVector<RCIndex, absl::flat_hash_set<int>>
3020 ignored_resources_per_class_;
3021};
3022
3023ResourceGroupAssignmentFilter::ResourceGroupAssignmentFilter(
3024 const std::vector<IntVar*>& nexts, const ResourceGroup* resource_group,
3025 LocalDimensionCumulOptimizer* lp_optimizer,
3026 LocalDimensionCumulOptimizer* mp_optimizer, bool filter_objective_cost)
3027 : BasePathFilter(nexts, lp_optimizer->dimension()->cumuls().size(),
3028 lp_optimizer->dimension()->model()->GetPathsMetadata()),
3029 model_(*lp_optimizer->dimension()->model()),
3030 dimension_(*lp_optimizer->dimension()),
3031 resource_group_(*resource_group),
3032 lp_optimizer_(lp_optimizer),
3033 mp_optimizer_(mp_optimizer),
3034 filter_objective_cost_(filter_objective_cost),
3035 current_synch_failed_(false),
3036 synchronized_cost_without_transit_(-1),
3037 delta_cost_without_transit_(-1) {
3038 vehicle_to_resource_class_assignment_costs_.resize(model_.vehicles());
3039 delta_vehicle_to_resource_class_assignment_costs_.resize(model_.vehicles());
3040}
3041
3042bool ResourceGroupAssignmentFilter::InitializeAcceptPath() {
3043 delta_vehicle_to_resource_class_assignment_costs_.assign(model_.vehicles(),
3044 {});
3045 if (current_synch_failed_) {
3046 return true;
3047 }
3048 // TODO(user): Keep track of num_used_vehicles internally and compute its
3049 // new value here by only going through the touched_paths_.
3050 int num_used_vehicles = 0;
3051 const int num_resources = resource_group_.Size();
3052 for (int v : resource_group_.GetVehiclesRequiringAResource()) {
3053 if (GetNext(model_.Start(v)) != model_.End(v) ||
3054 model_.IsVehicleUsedWhenEmpty(v)) {
3055 if (++num_used_vehicles > num_resources) {
3056 return false;
3057 }
3058 }
3059 }
3060 delta_vehicle_requires_resource_assignment_ =
3061 vehicle_requires_resource_assignment_;
3062 return true;
3063}
3064
3065bool ResourceGroupAssignmentFilter::AcceptPath(int64_t path_start, int64_t,
3066 int64_t) {
3067 if (current_synch_failed_) {
3068 return true;
3069 }
3070 const int vehicle = model_.VehicleIndex(path_start);
3071 bool is_infeasible = false;
3072 delta_vehicle_requires_resource_assignment_[vehicle] =
3073 VehicleRequiresResourceAssignment(
3074 vehicle, [this](int64_t n) { return GetNext(n); }, &is_infeasible);
3075 return !is_infeasible;
3076}
3077
3078bool ResourceGroupAssignmentFilter::FinalizeAcceptPath(
3079 int64_t /*objective_min*/, int64_t objective_max) {
3080 delta_cost_without_transit_ = 0;
3081 if (current_synch_failed_) {
3082 return true;
3083 }
3084 const auto& next_accessor = [this](int64_t index) { return GetNext(index); };
3085 delta_vehicles_requiring_resource_assignment_.clear();
3086 // First sum the costs of the routes not requiring resource assignment
3087 // (cheaper computations).
3088 for (int v = 0; v < model_.vehicles(); ++v) {
3089 if (delta_vehicle_requires_resource_assignment_[v]) {
3090 delta_vehicles_requiring_resource_assignment_.push_back(v);
3091 continue;
3092 }
3093 int64_t route_cost = 0;
3094 int64_t start = model_.Start(v);
3095 if (PathStartTouched(start)) {
3096 route_cost =
3097 ComputeRouteCumulCostWithoutResourceAssignment(v, next_accessor);
3098 if (route_cost < 0) {
3099 return false;
3100 }
3101 } else if (IsVarSynced(start)) {
3102 DCHECK_EQ(vehicle_to_resource_class_assignment_costs_[v].size(), 1);
3103 route_cost = vehicle_to_resource_class_assignment_costs_[v][0];
3104 }
3105 CapAddTo(route_cost, &delta_cost_without_transit_);
3106 if (delta_cost_without_transit_ > objective_max) {
3107 return false;
3108 }
3109 }
3110 // Recompute the assignment costs to resources for touched paths requiring
3111 // resource assignment.
3112 for (int64_t start : GetTouchedPathStarts()) {
3113 const int vehicle = model_.VehicleIndex(start);
3114 if (!delta_vehicle_requires_resource_assignment_[vehicle]) {
3115 // Already handled above.
3116 continue;
3117 }
3119 vehicle, /*solve_duration_ratio=*/1.0, resource_group_,
3120 ignored_resources_per_class_, next_accessor,
3121 dimension_.transit_evaluator(vehicle), filter_objective_cost_,
3122 lp_optimizer_, mp_optimizer_,
3123 &delta_vehicle_to_resource_class_assignment_costs_[vehicle],
3124 nullptr, nullptr)) {
3125 return false;
3126 }
3127 }
3128 const int64_t assignment_cost = ComputeBestVehicleToResourceAssignment(
3129 delta_vehicles_requiring_resource_assignment_,
3130 resource_group_.GetResourceIndicesPerClass(),
3131 ignored_resources_per_class_,
3132 /*vehicle_to_resource_class_assignment_costs=*/
3133 [this](int v) {
3134 return PathStartTouched(model_.Start(v))
3135 ? &delta_vehicle_to_resource_class_assignment_costs_[v]
3136 : &vehicle_to_resource_class_assignment_costs_[v];
3137 },
3138 nullptr);
3139 CapAddTo(assignment_cost, &delta_cost_without_transit_);
3140 return assignment_cost >= 0 && delta_cost_without_transit_ <= objective_max;
3141}
3142
3143void ResourceGroupAssignmentFilter::OnBeforeSynchronizePaths(bool) {
3144 if (!HasAnySyncedPath()) {
3145 vehicle_to_resource_class_assignment_costs_.assign(model_.vehicles(), {});
3146 }
3147 bound_resource_index_of_vehicle_.assign(model_.vehicles(), -1);
3148 vehicles_requiring_resource_assignment_.clear();
3149 vehicles_requiring_resource_assignment_.reserve(
3150 resource_group_.GetVehiclesRequiringAResource().size());
3151 vehicle_requires_resource_assignment_.assign(model_.vehicles(), false);
3152 ignored_resources_per_class_.assign(resource_group_.GetResourceClassesCount(),
3153 absl::flat_hash_set<int>());
3154
3155 for (int v : resource_group_.GetVehiclesRequiringAResource()) {
3156 const int64_t start = model_.Start(v);
3157 if (!IsVarSynced(start)) {
3158 continue;
3159 }
3160 vehicle_requires_resource_assignment_[v] =
3161 VehicleRequiresResourceAssignment(
3162 v, [this](int64_t n) { return Value(n); }, &current_synch_failed_);
3163 if (vehicle_requires_resource_assignment_[v]) {
3164 vehicles_requiring_resource_assignment_.push_back(v);
3165 }
3166 if (current_synch_failed_) {
3167 return;
3168 }
3169 }
3170 synchronized_cost_without_transit_ = 0;
3171}
3172
3173void ResourceGroupAssignmentFilter::OnSynchronizePathFromStart(int64_t start) {
3174 if (current_synch_failed_) return;
3175 DCHECK(IsVarSynced(start));
3176 const int v = model_.VehicleIndex(start);
3177 const auto& next_accessor = [this](int64_t index) {
3178 return IsVarSynced(index) ? Value(index) : -1;
3179 };
3180 if (!vehicle_requires_resource_assignment_[v]) {
3181 const int64_t route_cost =
3182 ComputeRouteCumulCostWithoutResourceAssignment(v, next_accessor);
3183 if (route_cost < 0) {
3184 current_synch_failed_ = true;
3185 return;
3186 }
3187 CapAddTo(route_cost, &synchronized_cost_without_transit_);
3188 vehicle_to_resource_class_assignment_costs_[v] = {route_cost};
3189 return;
3190 }
3191 // NOTE(user): Even if filter_objective_cost_ is false, we
3192 // still need to call ComputeVehicleToResourceClassAssignmentCosts() for every
3193 // vehicle requiring resource assignment to keep track of whether or not a
3194 // given vehicle-to-resource-class assignment is possible by storing 0 or -1
3195 // in vehicle_to_resource_class_assignment_costs_.
3196 // TODO(user): Adjust the 'solve_duration_ratio' below.
3198 v, /*solve_duration_ratio=*/1.0, resource_group_,
3199 ignored_resources_per_class_, next_accessor,
3200 dimension_.transit_evaluator(v), filter_objective_cost_,
3201 lp_optimizer_, mp_optimizer_,
3202 &vehicle_to_resource_class_assignment_costs_[v], nullptr, nullptr)) {
3203 vehicle_to_resource_class_assignment_costs_[v].assign(
3204 resource_group_.GetResourceClassesCount(), -1);
3205 current_synch_failed_ = true;
3206 }
3207}
3208
3209void ResourceGroupAssignmentFilter::OnAfterSynchronizePaths() {
3210 if (current_synch_failed_) {
3211 synchronized_cost_without_transit_ = 0;
3212 return;
3213 }
3214 if (!filter_objective_cost_) {
3215 DCHECK_EQ(synchronized_cost_without_transit_, 0);
3216 return;
3217 }
3218 const int64_t assignment_cost = ComputeBestVehicleToResourceAssignment(
3219 vehicles_requiring_resource_assignment_,
3220 resource_group_.GetResourceIndicesPerClass(),
3221 ignored_resources_per_class_,
3222 [this](int v) { return &vehicle_to_resource_class_assignment_costs_[v]; },
3223 nullptr);
3224 if (assignment_cost < 0) {
3225 synchronized_cost_without_transit_ = 0;
3226 current_synch_failed_ = true;
3227 } else {
3228 DCHECK_GE(synchronized_cost_without_transit_, 0);
3229 CapAddTo(assignment_cost, &synchronized_cost_without_transit_);
3230 }
3231}
3232
3233bool ResourceGroupAssignmentFilter::VehicleRequiresResourceAssignment(
3234 int vehicle, const std::function<int64_t(int64_t)>& next_accessor,
3235 bool* is_infeasible) {
3236 *is_infeasible = false;
3237 if (!resource_group_.VehicleRequiresAResource(vehicle)) return false;
3238 const IntVar* res_var = model_.ResourceVar(vehicle, resource_group_.Index());
3239 if (!model_.IsVehicleUsedWhenEmpty(vehicle) &&
3240 next_accessor(model_.Start(vehicle)) == model_.End(vehicle)) {
3241 if (res_var->Bound() && res_var->Value() >= 0) {
3242 // Vehicle with a resource (force-)assigned to it cannot be unused.
3243 *is_infeasible = true;
3244 }
3245 return false;
3246 }
3247 // Vehicle is used.
3248 if (res_var->Bound()) {
3249 // No need to do resource assignment for this vehicle.
3250 const int res = res_var->Value();
3251 if (res < 0) {
3252 // Vehicle has a negative resource index enforced but is used.
3253 *is_infeasible = true;
3254 } else {
3255 bound_resource_index_of_vehicle_[vehicle] = res;
3256 const RCIndex rc = resource_group_.GetResourceClassIndex(res);
3257 ignored_resources_per_class_[rc].insert(res);
3258 }
3259 return false;
3260 }
3261 // Vehicle is used and res_var isn't bound.
3262 return true;
3263}
3264
3265int64_t
3266ResourceGroupAssignmentFilter::ComputeRouteCumulCostWithoutResourceAssignment(
3267 int vehicle, const std::function<int64_t(int64_t)>& next_accessor) const {
3268 if (next_accessor(model_.Start(vehicle)) == model_.End(vehicle) &&
3269 !model_.IsVehicleUsedWhenEmpty(vehicle)) {
3270 return 0;
3271 }
3272 using Resource = RoutingModel::ResourceGroup::Resource;
3273 const Resource* resource = nullptr;
3274 if (resource_group_.VehicleRequiresAResource(vehicle)) {
3275 DCHECK_GE(bound_resource_index_of_vehicle_[vehicle], 0);
3276 resource =
3277 &resource_group_.GetResource(bound_resource_index_of_vehicle_[vehicle]);
3278 }
3279 int64_t route_cost = 0;
3280 const DimensionSchedulingStatus status =
3281 lp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
3282 vehicle, /*solve_duration_ratio=*/1.0, next_accessor, resource,
3283 filter_objective_cost_ ? &route_cost : nullptr);
3284 switch (status) {
3285 case DimensionSchedulingStatus::INFEASIBLE:
3286 return -1;
3287 case DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY:
3288 if (mp_optimizer_->ComputeRouteCumulCostWithoutFixedTransits(
3289 vehicle, /*solve_duration_ratio=*/1.0, next_accessor, resource,
3290 filter_objective_cost_ ? &route_cost : nullptr) ==
3291 DimensionSchedulingStatus::INFEASIBLE) {
3292 return -1;
3293 }
3294 break;
3295 default:
3296 DCHECK(status == DimensionSchedulingStatus::OPTIMAL ||
3297 status == DimensionSchedulingStatus::FEASIBLE);
3298 }
3299 return route_cost;
3300}
3301
3302// ResourceAssignmentFilter
3303class ResourceAssignmentFilter : public LocalSearchFilter {
3304 public:
3305 ResourceAssignmentFilter(const std::vector<IntVar*>& nexts,
3306 LocalDimensionCumulOptimizer* optimizer,
3307 LocalDimensionCumulOptimizer* mp_optimizer,
3308 bool propagate_own_objective_value,
3309 bool filter_objective_cost);
3310 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3311 int64_t objective_min, int64_t objective_max) override;
3312 void Synchronize(const Assignment* assignment,
3313 const Assignment* delta) override;
3314
3315 int64_t GetAcceptedObjectiveValue() const override {
3316 return propagate_own_objective_value_ ? delta_cost_ : 0;
3317 }
3318 int64_t GetSynchronizedObjectiveValue() const override {
3319 return propagate_own_objective_value_ ? synchronized_cost_ : 0;
3320 }
3321 std::string DebugString() const override {
3322 return "ResourceAssignmentFilter(" + dimension_name_ + ")";
3323 }
3324
3325 private:
3326 std::vector<IntVarLocalSearchFilter*> resource_group_assignment_filters_;
3327 int64_t synchronized_cost_;
3328 int64_t delta_cost_;
3329 const bool propagate_own_objective_value_;
3330 const std::string dimension_name_;
3331};
3332
3333ResourceAssignmentFilter::ResourceAssignmentFilter(
3334 const std::vector<IntVar*>& nexts,
3335 LocalDimensionCumulOptimizer* lp_optimizer,
3336 LocalDimensionCumulOptimizer* mp_optimizer,
3337 bool propagate_own_objective_value, bool filter_objective_cost)
3338 : propagate_own_objective_value_(propagate_own_objective_value),
3339 dimension_name_(lp_optimizer->dimension()->name()) {
3340 const RoutingModel& model = *lp_optimizer->dimension()->model();
3341 for (const auto& resource_group : model.GetResourceGroups()) {
3342 resource_group_assignment_filters_.push_back(
3343 model.solver()->RevAlloc(new ResourceGroupAssignmentFilter(
3344 nexts, resource_group.get(), lp_optimizer, mp_optimizer,
3345 filter_objective_cost)));
3346 }
3347}
3348
3349bool ResourceAssignmentFilter::Accept(const Assignment* delta,
3350 const Assignment* deltadelta,
3351 int64_t objective_min,
3352 int64_t objective_max) {
3353 delta_cost_ = 0;
3354 for (LocalSearchFilter* group_filter : resource_group_assignment_filters_) {
3355 if (!group_filter->Accept(delta, deltadelta, objective_min,
3356 objective_max)) {
3357 return false;
3358 }
3359 delta_cost_ =
3360 std::max(delta_cost_, group_filter->GetAcceptedObjectiveValue());
3361 DCHECK_LE(delta_cost_, objective_max)
3362 << "ResourceGroupAssignmentFilter should return false when the "
3363 "objective_max is exceeded.";
3364 }
3365 return true;
3366}
3367
3368void ResourceAssignmentFilter::Synchronize(const Assignment* assignment,
3369 const Assignment* delta) {
3370 synchronized_cost_ = 0;
3371 for (LocalSearchFilter* group_filter : resource_group_assignment_filters_) {
3372 group_filter->Synchronize(assignment, delta);
3373 synchronized_cost_ = std::max(
3374 synchronized_cost_, group_filter->GetSynchronizedObjectiveValue());
3375 }
3376}
3377
3378} // namespace
3379
3381 LocalDimensionCumulOptimizer* lp_optimizer,
3382 LocalDimensionCumulOptimizer* mp_optimizer,
3383 bool propagate_own_objective_value, bool filter_objective_cost) {
3384 const RoutingModel& model = *lp_optimizer->dimension()->model();
3385 DCHECK_NE(lp_optimizer, nullptr);
3386 DCHECK_NE(mp_optimizer, nullptr);
3387 return model.solver()->RevAlloc(new ResourceAssignmentFilter(
3388 model.Nexts(), lp_optimizer, mp_optimizer, propagate_own_objective_value,
3389 filter_objective_cost));
3390}
3391
3392namespace {
3393
3394// This filter accepts deltas for which the assignment satisfies the
3395// constraints of the Solver. This is verified by keeping an internal copy of
3396// the assignment with all Next vars and their updated values, and calling
3397// RestoreAssignment() on the assignment+delta.
3398// TODO(user): Also call the solution finalizer on variables, with the
3399// exception of Next Vars (woud fail on large instances).
3400// WARNING: In the case of mandatory nodes, when all vehicles are currently
3401// being used in the solution but uninserted nodes still remain, this filter
3402// will reject the solution, even if the node could be inserted on one of these
3403// routes, because all Next vars of vehicle starts are already instantiated.
3404// TODO(user): Avoid such false negatives.
3405
3406class CPFeasibilityFilter : public IntVarLocalSearchFilter {
3407 public:
3408 explicit CPFeasibilityFilter(RoutingModel* routing_model);
3409 ~CPFeasibilityFilter() override = default;
3410 std::string DebugString() const override { return "CPFeasibilityFilter"; }
3411 bool Accept(const Assignment* delta, const Assignment* deltadelta,
3412 int64_t objective_min, int64_t objective_max) override;
3413 void OnSynchronize(const Assignment* delta) override;
3414
3415 private:
3416 void AddDeltaToAssignment(const Assignment* delta, Assignment* assignment);
3417
3418 static const int64_t kUnassigned;
3419 const RoutingModel* const model_;
3420 Solver* const solver_;
3421 Assignment* const assignment_;
3422 Assignment* const temp_assignment_;
3423 DecisionBuilder* const restore_;
3424 SearchLimit* const limit_;
3425};
3426
3427const int64_t CPFeasibilityFilter::kUnassigned = -1;
3428
3429CPFeasibilityFilter::CPFeasibilityFilter(RoutingModel* routing_model)
3430 : IntVarLocalSearchFilter(routing_model->Nexts()),
3431 model_(routing_model),
3432 solver_(routing_model->solver()),
3433 assignment_(solver_->MakeAssignment()),
3434 temp_assignment_(solver_->MakeAssignment()),
3435 restore_(solver_->MakeRestoreAssignment(temp_assignment_)),
3436 limit_(solver_->MakeCustomLimit(
3437 [routing_model]() { return routing_model->CheckLimit(); })) {
3438 assignment_->Add(routing_model->Nexts());
3439}
3440
3441bool CPFeasibilityFilter::Accept(const Assignment* delta,
3442 const Assignment* /*deltadelta*/,
3443 int64_t /*objective_min*/,
3444 int64_t /*objective_max*/) {
3445 temp_assignment_->Copy(assignment_);
3446 AddDeltaToAssignment(delta, temp_assignment_);
3447
3448 return solver_->Solve(restore_, limit_);
3449}
3450
3451void CPFeasibilityFilter::OnSynchronize(const Assignment* delta) {
3452 AddDeltaToAssignment(delta, assignment_);
3453}
3454
3455void CPFeasibilityFilter::AddDeltaToAssignment(const Assignment* delta,
3456 Assignment* assignment) {
3457 if (delta == nullptr) {
3458 return;
3459 }
3460 Assignment::IntContainer* const container =
3461 assignment->MutableIntVarContainer();
3462 for (const IntVarElement& delta_element :
3463 delta->IntVarContainer().elements()) {
3464 IntVar* const var = delta_element.Var();
3465 int64_t index = kUnassigned;
3466 // Ignoring variables found in the delta which are not next variables, such
3467 // as vehicle variables.
3468 if (!FindIndex(var, &index)) continue;
3469 DCHECK_EQ(var, Var(index));
3470 const int64_t value = delta_element.Value();
3471
3472 container->AddAtPosition(var, index)->SetValue(value);
3473 if (model_->IsStart(index)) {
3474 if (model_->IsEnd(value)) {
3475 // Do not restore unused routes.
3476 container->MutableElement(index)->Deactivate();
3477 } else {
3478 // Re-activate the route's start in case it was deactivated before.
3479 container->MutableElement(index)->Activate();
3480 }
3481 }
3482 }
3483}
3484
3485} // namespace
3486
3488 return routing_model->solver()->RevAlloc(
3489 new CPFeasibilityFilter(routing_model));
3490}
3491
3492PathState::PathState(int num_nodes, std::vector<int> path_start,
3493 std::vector<int> path_end)
3494 : num_nodes_(num_nodes),
3495 num_paths_(path_start.size()),
3496 num_nodes_threshold_(std::max(16, 4 * num_nodes_)) // Arbitrary value.
3497{
3498 DCHECK_EQ(path_start.size(), num_paths_);
3499 DCHECK_EQ(path_end.size(), num_paths_);
3500 for (int p = 0; p < num_paths_; ++p) {
3501 path_start_end_.push_back({path_start[p], path_end[p]});
3502 }
3503 Reset();
3504}
3505
3507 is_invalid_ = false;
3508 // Initial state is all unperformed: paths go from start to end directly.
3509 committed_index_.assign(num_nodes_, -1);
3510 committed_paths_.assign(num_nodes_, kUnassigned);
3511 committed_nodes_.assign(2 * num_paths_, -1);
3512 chains_.assign(num_paths_ + 1, {-1, -1}); // Reserve 1 more for sentinel.
3513 paths_.assign(num_paths_, {-1, -1});
3514 for (int path = 0; path < num_paths_; ++path) {
3515 const int index = 2 * path;
3516 const auto& [start, end] = path_start_end_[path];
3517 committed_index_[start] = index;
3518 committed_index_[end] = index + 1;
3519
3520 committed_nodes_[index] = start;
3521 committed_nodes_[index + 1] = end;
3522
3523 committed_paths_[start] = path;
3524 committed_paths_[end] = path;
3525
3526 chains_[path] = {index, index + 2};
3527 paths_[path] = {path, path + 1};
3528 }
3529 chains_[num_paths_] = {0, 0}; // Sentinel.
3530 // Nodes that are not starts or ends are not in any path, but they still need
3531 // to be represented in the committed state.
3532 for (int node = 0; node < num_nodes_; ++node) {
3533 if (committed_index_[node] != -1) continue; // node is start or end.
3534 committed_index_[node] = committed_nodes_.size();
3535 committed_nodes_.push_back(node);
3536 }
3537}
3538
3540 const PathBounds bounds = paths_[path];
3541 return PathState::ChainRange(chains_.data() + bounds.begin_index,
3542 chains_.data() + bounds.end_index,
3543 committed_nodes_.data());
3544}
3545
3547 const PathBounds bounds = paths_[path];
3548 return PathState::NodeRange(chains_.data() + bounds.begin_index,
3549 chains_.data() + bounds.end_index,
3550 committed_nodes_.data());
3551}
3552
3553void PathState::ChangePath(int path, absl::Span<const ChainBounds> chains) {
3554 changed_paths_.push_back(path);
3555 const int path_begin_index = chains_.size();
3556 chains_.insert(chains_.end(), chains.begin(), chains.end());
3557 const int path_end_index = chains_.size();
3558 paths_[path] = {path_begin_index, path_end_index};
3559 chains_.emplace_back(0, 0); // Sentinel.
3560}
3561
3562void PathState::ChangeLoops(absl::Span<const int> new_loops) {
3563 for (const int loop : new_loops) {
3564 // If the node was already a loop, do not add it.
3565 // If it was not assigned, it becomes a loop.
3566 if (Path(loop) == kLoop) continue;
3567 changed_loops_.push_back(loop);
3568 }
3569}
3570
3572 DCHECK(!IsInvalid());
3573 if (committed_nodes_.size() < num_nodes_threshold_) {
3574 IncrementalCommit();
3575 } else {
3576 FullCommit();
3577 }
3578}
3579
3581 is_invalid_ = false;
3582 chains_.resize(num_paths_ + 1); // One per path + sentinel.
3583 for (const int path : changed_paths_) {
3584 paths_[path] = {path, path + 1};
3585 }
3586 changed_paths_.clear();
3587 changed_loops_.clear();
3588}
3589
3590void PathState::CopyNewPathAtEndOfNodes(int path) {
3591 // Copy path's nodes, chain by chain.
3592 const PathBounds path_bounds = paths_[path];
3593 for (int i = path_bounds.begin_index; i < path_bounds.end_index; ++i) {
3594 const ChainBounds chain_bounds = chains_[i];
3595 committed_nodes_.insert(committed_nodes_.end(),
3596 committed_nodes_.data() + chain_bounds.begin_index,
3597 committed_nodes_.data() + chain_bounds.end_index);
3598 if (committed_paths_[committed_nodes_.back()] == path) continue;
3599 for (int i = chain_bounds.begin_index; i < chain_bounds.end_index; ++i) {
3600 const int node = committed_nodes_[i];
3601 committed_paths_[node] = path;
3602 }
3603 }
3604}
3605
3606// TODO(user): Instead of copying paths at the end systematically,
3607// reuse some of the memory when possible.
3608void PathState::IncrementalCommit() {
3609 const int new_nodes_begin = committed_nodes_.size();
3610 for (const int path : ChangedPaths()) {
3611 const int chain_begin = committed_nodes_.size();
3612 CopyNewPathAtEndOfNodes(path);
3613 const int chain_end = committed_nodes_.size();
3614 chains_[path] = {chain_begin, chain_end};
3615 }
3616 // Re-index all copied nodes.
3617 const int new_nodes_end = committed_nodes_.size();
3618 for (int i = new_nodes_begin; i < new_nodes_end; ++i) {
3619 const int node = committed_nodes_[i];
3620 committed_index_[node] = i;
3621 }
3622 // New loops stay in place: only change their path to kLoop,
3623 // committed_index_ does not change.
3624 for (const int loop : ChangedLoops()) {
3625 committed_paths_[loop] = kLoop;
3626 }
3627 // Committed part of the state is set up, erase incremental changes.
3628 Revert();
3629}
3630
3631void PathState::FullCommit() {
3632 // Copy all paths at the end of committed_nodes_,
3633 // then remove all old committed_nodes_.
3634 const int old_num_nodes = committed_nodes_.size();
3635 for (int path = 0; path < num_paths_; ++path) {
3636 const int new_path_begin = committed_nodes_.size() - old_num_nodes;
3637 CopyNewPathAtEndOfNodes(path);
3638 const int new_path_end = committed_nodes_.size() - old_num_nodes;
3639 chains_[path] = {new_path_begin, new_path_end};
3640 }
3641 committed_nodes_.erase(committed_nodes_.begin(),
3642 committed_nodes_.begin() + old_num_nodes);
3643
3644 // Reindex path nodes, then loop nodes.
3645 constexpr int kUnindexed = -1;
3646 committed_index_.assign(num_nodes_, kUnindexed);
3647 int index = 0;
3648 for (const int node : committed_nodes_) {
3649 committed_index_[node] = index++;
3650 }
3651 for (int node = 0; node < num_nodes_; ++node) {
3652 if (committed_index_[node] != kUnindexed) continue;
3653 committed_index_[node] = index++;
3654 committed_nodes_.push_back(node);
3655 }
3656 for (const int loop : ChangedLoops()) {
3657 committed_paths_[loop] = kLoop;
3658 }
3659 // Committed part of the state is set up, erase incremental changes.
3660 Revert();
3661}
3662
3663namespace {
3664
3665class PathStateFilter : public LocalSearchFilter {
3666 public:
3667 std::string DebugString() const override { return "PathStateFilter"; }
3668 PathStateFilter(std::unique_ptr<PathState> path_state,
3669 const std::vector<IntVar*>& nexts);
3670 void Relax(const Assignment* delta, const Assignment* deltadelta) override;
3671 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
3672 return true;
3673 }
3674 void Synchronize(const Assignment*, const Assignment*) override {};
3675 void Commit(const Assignment* assignment, const Assignment* delta) override;
3676 void Revert() override;
3677 void Reset() override;
3678
3679 private:
3680 // Used in arc to chain translation, see below.
3681 struct TailHeadIndices {
3682 int tail_index;
3683 int head_index;
3684 };
3685 struct IndexArc {
3686 int index;
3687 int arc;
3688 bool operator<(const IndexArc& other) const { return index < other.index; }
3689 };
3690
3691 // Translate changed_arcs_ to chains, pass to underlying PathState.
3692 void CutChains();
3693 // From changed_paths_ and changed_arcs_, fill chains_ and paths_.
3694 // Selection-based algorithm in O(n^2), to use for small change sets.
3695 void MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm();
3696 // From changed_paths_ and changed_arcs_, fill chains_ and paths_.
3697 // Generic algorithm in O(sort(n)+n), to use for larger change sets.
3698 void MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm();
3699
3700 const std::unique_ptr<PathState> path_state_;
3701 // Map IntVar* index to node, offset by the min index in nexts.
3702 std::vector<int> variable_index_to_node_;
3703 int index_offset_;
3704
3705 // Used in CutChains(), class member status avoids reallocations.
3706 std::vector<int> changed_paths_;
3707 std::vector<bool> path_has_changed_;
3708 std::vector<std::pair<int, int>> changed_arcs_;
3709 std::vector<int> changed_loops_;
3710 std::vector<TailHeadIndices> tail_head_indices_;
3711 std::vector<IndexArc> arcs_by_tail_index_;
3712 std::vector<IndexArc> arcs_by_head_index_;
3713 std::vector<int> next_arc_;
3714 std::vector<PathState::ChainBounds> path_chains_;
3715};
3716
3717PathStateFilter::PathStateFilter(std::unique_ptr<PathState> path_state,
3718 const std::vector<IntVar*>& nexts)
3719 : path_state_(std::move(path_state)) {
3720 {
3721 int min_index = std::numeric_limits<int>::max();
3722 int max_index = std::numeric_limits<int>::min();
3723 for (const IntVar* next : nexts) {
3724 const int index = next->index();
3725 min_index = std::min<int>(min_index, index);
3726 max_index = std::max<int>(max_index, index);
3727 }
3728 variable_index_to_node_.resize(max_index - min_index + 1, -1);
3729 index_offset_ = min_index;
3730 }
3731
3732 for (int node = 0; node < nexts.size(); ++node) {
3733 const int index = nexts[node]->index() - index_offset_;
3734 variable_index_to_node_[index] = node;
3735 }
3736 path_has_changed_.assign(path_state_->NumPaths(), false);
3737}
3738
3739void PathStateFilter::Relax(const Assignment* delta, const Assignment*) {
3740 path_state_->Revert();
3741 changed_arcs_.clear();
3742 for (const IntVarElement& var_value : delta->IntVarContainer().elements()) {
3743 if (var_value.Var() == nullptr) continue;
3744 const int index = var_value.Var()->index() - index_offset_;
3745 if (index < 0 || variable_index_to_node_.size() <= index) continue;
3746 const int node = variable_index_to_node_[index];
3747 if (node == -1) continue;
3748 if (var_value.Bound()) {
3749 changed_arcs_.emplace_back(node, var_value.Value());
3750 } else {
3751 path_state_->Revert();
3752 path_state_->SetInvalid();
3753 return;
3754 }
3755 }
3756 CutChains();
3757}
3758
3759void PathStateFilter::Reset() { path_state_->Reset(); }
3760
3761// The solver does not guarantee that a given Commit() corresponds to
3762// the previous Relax() (or that there has been a call to Relax()),
3763// so we replay the full change call sequence.
3764void PathStateFilter::Commit(const Assignment* assignment,
3765 const Assignment* delta) {
3766 path_state_->Revert();
3767 if (delta == nullptr || delta->Empty()) {
3768 Relax(assignment, nullptr);
3769 } else {
3770 Relax(delta, nullptr);
3771 }
3772 path_state_->Commit();
3773}
3774
3775void PathStateFilter::Revert() { path_state_->Revert(); }
3776
3777void PathStateFilter::CutChains() {
3778 // Filter out unchanged arcs from changed_arcs_,
3779 // translate changed arcs to changed arc indices.
3780 // Fill changed_paths_ while we hold node_path.
3781 for (const int path : changed_paths_) path_has_changed_[path] = false;
3782 changed_paths_.clear();
3783 tail_head_indices_.clear();
3784 changed_loops_.clear();
3785 int num_changed_arcs = 0;
3786 for (const auto [node, next] : changed_arcs_) {
3787 const int node_index = path_state_->CommittedIndex(node);
3788 const int next_index = path_state_->CommittedIndex(next);
3789 const int node_path = path_state_->Path(node);
3790 if (next != node &&
3791 (next_index != node_index + 1 || node_path < 0)) { // New arc.
3792 tail_head_indices_.push_back({node_index, next_index});
3793 changed_arcs_[num_changed_arcs++] = {node, next};
3794 if (node_path >= 0 && !path_has_changed_[node_path]) {
3795 path_has_changed_[node_path] = true;
3796 changed_paths_.push_back(node_path);
3797 }
3798 } else if (node == next && node_path != PathState::kLoop) { // New loop.
3799 changed_loops_.push_back(node);
3800 }
3801 }
3802 changed_arcs_.resize(num_changed_arcs);
3803
3804 path_state_->ChangeLoops(changed_loops_);
3805 if (tail_head_indices_.size() + changed_paths_.size() <= 8) {
3806 MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm();
3807 } else {
3808 MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm();
3809 }
3810}
3811
3812void PathStateFilter::
3813 MakeChainsFromChangedPathsAndArcsWithSelectionAlgorithm() {
3814 int num_visited_changed_arcs = 0;
3815 const int num_changed_arcs = tail_head_indices_.size();
3816 // For every path, find all its chains.
3817 for (const int path : changed_paths_) {
3818 path_chains_.clear();
3819 const auto [start_index, end_index] = path_state_->CommittedPathRange(path);
3820 int current_index = start_index;
3821 while (true) {
3822 // Look for smallest non-visited tail_index that is no smaller than
3823 // current_index.
3824 int selected_arc = -1;
3825 int selected_tail_index = std::numeric_limits<int>::max();
3826 for (int i = num_visited_changed_arcs; i < num_changed_arcs; ++i) {
3827 const int tail_index = tail_head_indices_[i].tail_index;
3828 if (current_index <= tail_index && tail_index < selected_tail_index) {
3829 selected_arc = i;
3830 selected_tail_index = tail_index;
3831 }
3832 }
3833 // If there is no such tail index, or more generally if the next chain
3834 // would be cut by end of path,
3835 // stack {current_index, end_index + 1} in chains_, and go to next path.
3836 // Otherwise, stack {current_index, tail_index+1} in chains_,
3837 // set current_index = head_index, set pair to visited.
3838 if (start_index <= current_index && current_index < end_index &&
3839 end_index <= selected_tail_index) {
3840 path_chains_.emplace_back(current_index, end_index);
3841 break;
3842 } else {
3843 path_chains_.emplace_back(current_index, selected_tail_index + 1);
3844 current_index = tail_head_indices_[selected_arc].head_index;
3845 std::swap(tail_head_indices_[num_visited_changed_arcs],
3846 tail_head_indices_[selected_arc]);
3847 ++num_visited_changed_arcs;
3848 }
3849 }
3850 path_state_->ChangePath(path, path_chains_);
3851 }
3852}
3853
3854void PathStateFilter::MakeChainsFromChangedPathsAndArcsWithGenericAlgorithm() {
3855 // TRICKY: For each changed path, we want to generate a sequence of chains
3856 // that represents the path in the changed state.
3857 // First, notice that if we add a fake end->start arc for each changed path,
3858 // then all chains will be from the head of an arc to the tail of an arc.
3859 // A way to generate the changed chains and paths would be, for each path,
3860 // to start from a fake arc's head (the path start), go down the path until
3861 // the tail of an arc, and go to the next arc until we return on the fake arc,
3862 // enqueuing the [head, tail] chains as we go.
3863 // In turn, to do that, we need to know which arc to go to.
3864 // If we sort all heads and tails by index in two separate arrays,
3865 // the head_index and tail_index at the same rank are such that
3866 // [head_index, tail_index] is a chain. Moreover, the arc that must be visited
3867 // after head_index's arc is tail_index's arc.
3868
3869 // Add a fake end->start arc for each path.
3870 for (const int path : changed_paths_) {
3871 const auto [start_index, end_index] = path_state_->CommittedPathRange(path);
3872 tail_head_indices_.push_back({end_index - 1, start_index});
3873 }
3874
3875 // Generate pairs (tail_index, arc) and (head_index, arc) for all arcs,
3876 // sort those pairs by index.
3877 const int num_arc_indices = tail_head_indices_.size();
3878 arcs_by_tail_index_.resize(num_arc_indices);
3879 arcs_by_head_index_.resize(num_arc_indices);
3880 for (int i = 0; i < num_arc_indices; ++i) {
3881 arcs_by_tail_index_[i] = {tail_head_indices_[i].tail_index, i};
3882 arcs_by_head_index_[i] = {tail_head_indices_[i].head_index, i};
3883 }
3884 std::sort(arcs_by_tail_index_.begin(), arcs_by_tail_index_.end());
3885 std::sort(arcs_by_head_index_.begin(), arcs_by_head_index_.end());
3886 // Generate the map from arc to next arc in path.
3887 next_arc_.resize(num_arc_indices);
3888 for (int i = 0; i < num_arc_indices; ++i) {
3889 next_arc_[arcs_by_head_index_[i].arc] = arcs_by_tail_index_[i].arc;
3890 }
3891
3892 // Generate chains: for every changed path, start from its fake arc,
3893 // jump to next_arc_ until going back to fake arc,
3894 // enqueuing chains as we go.
3895 const int first_fake_arc = num_arc_indices - changed_paths_.size();
3896 for (int fake_arc = first_fake_arc; fake_arc < num_arc_indices; ++fake_arc) {
3897 path_chains_.clear();
3898 int32_t arc = fake_arc;
3899 do {
3900 const int chain_begin = tail_head_indices_[arc].head_index;
3901 arc = next_arc_[arc];
3902 const int chain_end = tail_head_indices_[arc].tail_index + 1;
3903 path_chains_.emplace_back(chain_begin, chain_end);
3904 } while (arc != fake_arc);
3905 const int path = changed_paths_[fake_arc - first_fake_arc];
3906 path_state_->ChangePath(path, path_chains_);
3907 }
3908}
3909
3910} // namespace
3911
3913 std::unique_ptr<PathState> path_state,
3914 const std::vector<IntVar*>& nexts) {
3915 PathStateFilter* filter = new PathStateFilter(std::move(path_state), nexts);
3916 return solver->RevAlloc(filter);
3917}
3918
3919namespace {
3920using EInterval = DimensionChecker::ExtendedInterval;
3921
3922constexpr int64_t kint64min = std::numeric_limits<int64_t>::min();
3923constexpr int64_t kint64max = std::numeric_limits<int64_t>::max();
3924
3925EInterval operator&(const EInterval& i1, const EInterval& i2) {
3926 return {std::max(i1.num_negative_infinity == 0 ? i1.min : kint64min,
3927 i2.num_negative_infinity == 0 ? i2.min : kint64min),
3928 std::min(i1.num_positive_infinity == 0 ? i1.max : kint64max,
3929 i2.num_positive_infinity == 0 ? i2.max : kint64max),
3930 std::min(i1.num_negative_infinity, i2.num_negative_infinity),
3931 std::min(i1.num_positive_infinity, i2.num_positive_infinity)};
3932}
3933
3934EInterval operator&=(EInterval& i1, const EInterval& i2) {
3935 i1 = i1 & i2;
3936 return i1;
3937}
3938
3939bool IsEmpty(const EInterval& interval) {
3940 const int64_t minimum_value =
3941 interval.num_negative_infinity == 0 ? interval.min : kint64min;
3942 const int64_t maximum_value =
3943 interval.num_positive_infinity == 0 ? interval.max : kint64max;
3944 return minimum_value > maximum_value;
3945}
3946
3947EInterval operator+(const EInterval& i1, const EInterval& i2) {
3948 return {CapAdd(i1.min, i2.min), CapAdd(i1.max, i2.max),
3949 i1.num_negative_infinity + i2.num_negative_infinity,
3950 i1.num_positive_infinity + i2.num_positive_infinity};
3951}
3952
3953EInterval& operator+=(EInterval& i1, const EInterval& i2) {
3954 i1 = i1 + i2;
3955 return i1;
3956}
3957
3958EInterval operator-(const EInterval& i1, const EInterval& i2) {
3959 return {CapSub(i1.min, i2.max), CapSub(i1.max, i2.min),
3960 i1.num_negative_infinity + i2.num_positive_infinity,
3961 i1.num_positive_infinity + i2.num_negative_infinity};
3962}
3963
3964// Return the interval delta such that from + delta = to.
3965// Note that the result is not the same as "to + (-from)".
3966EInterval Delta(const EInterval& from, const EInterval& to) {
3967 return {CapSub(to.min, from.min), CapSub(to.max, from.max),
3968 to.num_negative_infinity - from.num_negative_infinity,
3969 to.num_positive_infinity - from.num_positive_infinity};
3970}
3971
3972EInterval ToExtendedInterval(DimensionChecker::Interval interval) {
3973 const bool is_neg_infinity = interval.min == kint64min;
3974 const bool is_pos_infinity = interval.max == kint64max;
3975 return {is_neg_infinity ? 0 : interval.min,
3976 is_pos_infinity ? 0 : interval.max, is_neg_infinity ? 1 : 0,
3977 is_pos_infinity ? 1 : 0};
3978}
3979
3980std::vector<EInterval> ToExtendedIntervals(
3981 absl::Span<const DimensionChecker::Interval> intervals) {
3982 std::vector<EInterval> extended_intervals;
3983 extended_intervals.reserve(intervals.size());
3984 for (const auto& interval : intervals) {
3985 extended_intervals.push_back(ToExtendedInterval(interval));
3986 }
3987 return extended_intervals;
3988}
3989} // namespace
3990
3992 const PathState* path_state, std::vector<Interval> path_capacity,
3993 std::vector<int> path_class,
3994 std::vector<std::function<Interval(int64_t, int64_t)>>
3995 demand_per_path_class,
3996 std::vector<Interval> node_capacity, int min_range_size_for_riq)
3997 : path_state_(path_state),
3998 path_capacity_(ToExtendedIntervals(path_capacity)),
3999 path_class_(std::move(path_class)),
4000 demand_per_path_class_(std::move(demand_per_path_class)),
4001 node_capacity_(ToExtendedIntervals(node_capacity)),
4002 index_(path_state_->NumNodes(), 0),
4003 maximum_riq_layer_size_(std::max(
4004 16, 4 * path_state_->NumNodes())), // 16 and 4 are arbitrary.
4005 min_range_size_for_riq_(min_range_size_for_riq) {
4006 const int num_nodes = path_state_->NumNodes();
4007 cached_demand_.resize(num_nodes);
4008 const int num_paths = path_state_->NumPaths();
4009 DCHECK_EQ(num_paths, path_capacity_.size());
4010 DCHECK_EQ(num_paths, path_class_.size());
4011 const int maximum_riq_exponent = MostSignificantBitPosition32(num_nodes);
4012 riq_.resize(maximum_riq_exponent + 1);
4013 FullCommit();
4014}
4015
4017 if (path_state_->IsInvalid()) return true;
4018 for (const int path : path_state_->ChangedPaths()) {
4019 const EInterval path_capacity = path_capacity_[path];
4020 const int path_class = path_class_[path];
4021 // Loop invariant: except for the first chain, cumul represents the cumul
4022 // state of the last node of the previous chain, and it is nonempty.
4023 int prev_node = path_state_->Start(path);
4024 EInterval cumul = node_capacity_[prev_node] & path_capacity;
4025 if (IsEmpty(cumul)) return false;
4026
4027 for (const auto chain : path_state_->Chains(path)) {
4028 const int first_node = chain.First();
4029 const int last_node = chain.Last();
4030
4031 if (prev_node != first_node) {
4032 // Bring cumul state from last node of previous chain to first node of
4033 // current chain.
4034 const EInterval demand = ToExtendedInterval(
4035 demand_per_path_class_[path_class](prev_node, first_node));
4036 cumul += demand;
4037 cumul &= path_capacity;
4038 cumul &= node_capacity_[first_node];
4039 if (IsEmpty(cumul)) return false;
4040 prev_node = first_node;
4041 }
4042
4043 // Bring cumul state from first node to last node of the current chain.
4044 const int first_index = index_[first_node];
4045 const int last_index = index_[last_node];
4046 const int chain_path = path_state_->Path(first_node);
4047 const int chain_path_class =
4048 chain_path < 0 ? -1 : path_class_[chain_path];
4049 // Use a RIQ if the chain size is large enough;
4050 // the optimal size was found with the associated benchmark in tests,
4051 // in particular BM_DimensionChecker<ChangeSparsity::kSparse, *>.
4052 const bool chain_is_cached = chain_path_class == path_class;
4053 if (last_index - first_index > min_range_size_for_riq_ &&
4054 chain_is_cached) {
4055 UpdateCumulUsingChainRIQ(first_index, last_index, path_capacity, cumul);
4056 if (IsEmpty(cumul)) return false;
4057 prev_node = chain.Last();
4058 } else {
4059 for (const int node : chain.WithoutFirstNode()) {
4060 const EInterval demand =
4061 chain_is_cached
4062 ? cached_demand_[prev_node]
4063 : ToExtendedInterval(
4064 demand_per_path_class_[path_class](prev_node, node));
4065 cumul += demand;
4066 cumul &= node_capacity_[node];
4067 cumul &= path_capacity;
4068 if (IsEmpty(cumul)) return false;
4069 prev_node = node;
4070 }
4071 }
4072 }
4073 }
4074 return true;
4075}
4076
4078 const int current_layer_size = riq_[0].size();
4079 int change_size = path_state_->ChangedPaths().size();
4080 for (const int path : path_state_->ChangedPaths()) {
4081 for (const auto chain : path_state_->Chains(path)) {
4082 change_size += chain.NumNodes();
4083 }
4084 }
4085 if (current_layer_size + change_size <= maximum_riq_layer_size_) {
4086 IncrementalCommit();
4087 } else {
4088 FullCommit();
4089 }
4090}
4091
4092void DimensionChecker::IncrementalCommit() {
4093 for (const int path : path_state_->ChangedPaths()) {
4094 const int begin_index = riq_[0].size();
4095 AppendPathDemandsToSums(path);
4096 UpdateRIQStructure(begin_index, riq_[0].size());
4097 }
4098}
4099
4100void DimensionChecker::FullCommit() {
4101 // Clear all structures.
4102 for (auto& layer : riq_) layer.clear();
4103 // Append all paths.
4104 const int num_paths = path_state_->NumPaths();
4105 for (int path = 0; path < num_paths; ++path) {
4106 const int begin_index = riq_[0].size();
4107 AppendPathDemandsToSums(path);
4108 UpdateRIQStructure(begin_index, riq_[0].size());
4109 }
4110}
4111
4112void DimensionChecker::AppendPathDemandsToSums(int path) {
4113 // Value of forwards_demand_sums_riq_ at node_index must be the sum
4114 // of all demands of nodes from start of path to node.
4115 const int path_class = path_class_[path];
4116 EInterval demand_sum = {0, 0, 0, 0};
4117 int prev = path_state_->Start(path);
4118 int index = riq_[0].size();
4119 for (const int node : path_state_->Nodes(path)) {
4120 // Transition to current node.
4121 const EInterval demand =
4122 prev == node ? EInterval{0, 0, 0, 0}
4123 : ToExtendedInterval(
4124 demand_per_path_class_[path_class](prev, node));
4125 demand_sum += demand;
4126 cached_demand_[prev] = demand;
4127 prev = node;
4128 // Store all data of current node.
4129 index_[node] = index++;
4130 riq_[0].push_back({.cumuls_to_fst = node_capacity_[node],
4131 .tightest_tsum = demand_sum,
4132 .cumuls_to_lst = node_capacity_[node],
4133 .tsum_at_fst = demand_sum,
4134 .tsum_at_lst = demand_sum});
4135 }
4136 cached_demand_[path_state_->End(path)] = {0, 0, 0, 0};
4137}
4138
4139void DimensionChecker::UpdateRIQStructure(int begin_index, int end_index) {
4140 // The max layer is the one used by Range Intersection Query functions on
4141 // (begin_index, end_index - 1).
4142 const int max_layer =
4143 MostSignificantBitPosition32(end_index - begin_index - 1);
4144 for (int layer = 1, half_window = 1; layer <= max_layer;
4145 ++layer, half_window *= 2) {
4146 riq_[layer].resize(end_index);
4147 for (int i = begin_index + 2 * half_window - 1; i < end_index; ++i) {
4148 // The window covered by riq_[layer][i] goes from
4149 // first = i - 2 * half_window + 1 to last = i, inclusive.
4150 // Values are computed from two half-windows of the layer below,
4151 // the F-window = (i - 2 * half_window, i - half_window], and
4152 // the L-window - (i - half_window, i].
4153 const RIQNode& fw = riq_[layer - 1][i - half_window];
4154 const RIQNode& lw = riq_[layer - 1][i];
4155 const EInterval lst_to_lst = Delta(fw.tsum_at_lst, lw.tsum_at_lst);
4156 const EInterval fst_to_fst = Delta(fw.tsum_at_fst, lw.tsum_at_fst);
4157
4158 riq_[layer][i] = {
4159 .cumuls_to_fst = fw.cumuls_to_fst & lw.cumuls_to_fst - fst_to_fst,
4160 .tightest_tsum = fw.tightest_tsum & lw.tightest_tsum,
4161 .cumuls_to_lst = fw.cumuls_to_lst + lst_to_lst & lw.cumuls_to_lst,
4162 .tsum_at_fst = fw.tsum_at_fst,
4163 .tsum_at_lst = lw.tsum_at_lst};
4164 }
4165 }
4166}
4167
4168// The RIQ schema decomposes the request into two windows:
4169// - the F window covers indices [first_index, first_index + window)
4170// - the L window covers indices (last_index - window, last_index]
4171// The decomposition uses the first and last nodes of these windows.
4172void DimensionChecker::UpdateCumulUsingChainRIQ(
4173 int first_index, int last_index, const ExtendedInterval& path_capacity,
4174 ExtendedInterval& cumul) const {
4175 DCHECK_LE(0, first_index);
4176 DCHECK_LT(first_index, last_index);
4177 DCHECK_LT(last_index, riq_[0].size());
4178 const int layer = MostSignificantBitPosition32(last_index - first_index);
4179 const int window = 1 << layer;
4180 const RIQNode& fw = riq_[layer][first_index + window - 1];
4181 const RIQNode& lw = riq_[layer][last_index];
4182
4183 // Compute the set of cumul values that can reach the last node.
4184 cumul &= fw.cumuls_to_fst;
4185 cumul &= lw.cumuls_to_fst - Delta(fw.tsum_at_fst, lw.tsum_at_fst);
4186 cumul &= path_capacity -
4187 Delta(fw.tsum_at_fst, fw.tightest_tsum & lw.tightest_tsum);
4188
4189 // We need to check for emptiness before widening the interval with transit.
4190 if (IsEmpty(cumul)) return;
4191
4192 // Transit to last node.
4193 cumul += Delta(fw.tsum_at_fst, lw.tsum_at_lst);
4194
4195 // Compute the set of cumul values that are reached from first node.
4196 cumul &= fw.cumuls_to_lst + Delta(fw.tsum_at_lst, lw.tsum_at_lst);
4197 cumul &= lw.cumuls_to_lst;
4198}
4199
4200namespace {
4201
4202class DimensionFilter : public LocalSearchFilter {
4203 public:
4204 std::string DebugString() const override { return name_; }
4205 DimensionFilter(std::unique_ptr<DimensionChecker> checker,
4206 absl::string_view dimension_name)
4207 : checker_(std::move(checker)),
4208 name_(absl::StrCat("DimensionFilter(", dimension_name, ")")) {}
4209
4210 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4211 return checker_->Check();
4212 }
4213
4214 void Synchronize(const Assignment*, const Assignment*) override {
4215 checker_->Commit();
4216 }
4217
4218 private:
4219 std::unique_ptr<DimensionChecker> checker_;
4220 const std::string name_;
4221};
4222
4223} // namespace
4224
4226 Solver* solver, std::unique_ptr<DimensionChecker> checker,
4227 absl::string_view dimension_name) {
4228 DimensionFilter* filter =
4229 new DimensionFilter(std::move(checker), dimension_name);
4230 return solver->RevAlloc(filter);
4231}
4232
4234 PathState* path_state, std::vector<PathData> path_data)
4235 : path_state_(path_state), path_data_(std::move(path_data)) {}
4236
4238 for (const int path : path_state_->ChangedPaths()) {
4239 path_data_[path].start_cumul.Relax();
4240 path_data_[path].end_cumul.Relax();
4241 path_data_[path].span.Relax();
4242 }
4243}
4244
4246 for (const int path : path_state_->ChangedPaths()) {
4247 if (!path_data_[path].span.Exists()) continue;
4248 const PathData& data = path_data_[path];
4249 const int64_t total_transit = data.total_transit.Min();
4250 int64_t lb_span = data.span.Min();
4251 // Improve bounds on span/start max/end min using time windows: breaks that
4252 // must occur inside the path have their duration accumulated into
4253 // lb_span_tw, they also widen [start_max, end_min).
4254 int64_t lb_span_tw = total_transit;
4255 int64_t start_max = data.start_cumul.Max();
4256 int64_t end_min = data.end_cumul.Min();
4257 for (const auto& br : data.vehicle_breaks) {
4258 if (!br.is_performed_min) continue;
4259 if (br.start_max < end_min && start_max < br.end_min) {
4260 CapAddTo(br.duration_min, &lb_span_tw);
4261 start_max = std::min(start_max, br.start_max);
4262 end_min = std::max(end_min, br.end_min);
4263 }
4264 }
4265 lb_span = std::max({lb_span, lb_span_tw, CapSub(end_min, start_max)});
4266 // Compute num_feasible_breaks = number of breaks that may fit into route,
4267 // and [breaks_start_min, breaks_end_max) = max coverage of breaks.
4268 int64_t break_start_min = kint64max;
4269 int64_t break_end_max = kint64min;
4270 int64_t start_min = data.start_cumul.Min();
4271 start_min = std::max(start_min, CapSub(end_min, data.span.Max()));
4272 int64_t end_max = data.end_cumul.Max();
4273 end_max = std::min(end_max, CapAdd(start_max, data.span.Max()));
4274 int num_feasible_breaks = 0;
4275 for (const auto& br : data.vehicle_breaks) {
4276 if (start_min <= br.start_max && br.end_min <= end_max) {
4277 break_start_min = std::min(break_start_min, br.start_min);
4278 break_end_max = std::max(break_end_max, br.end_max);
4279 ++num_feasible_breaks;
4280 }
4281 }
4282 // Improve span/start min/end max using interbreak limits: there must be
4283 // enough breaks inside the path, so that for each limit, the union of
4284 // [br.start - max_interbreak, br.end + max_interbreak) covers [start, end),
4285 // or [start, end) is shorter than max_interbreak.
4286 for (const auto& [max_interbreak, min_break_duration] :
4287 data.interbreak_limits) {
4288 // Minimal number of breaks depends on total transit:
4289 // 0 breaks for 0 <= total transit <= limit,
4290 // 1 break for limit + 1 <= total transit <= 2 * limit,
4291 // i breaks for i * limit + 1 <= total transit <= (i+1) * limit, ...
4292 if (max_interbreak == 0) {
4293 if (total_transit > 0) return false;
4294 continue;
4295 }
4296 int64_t min_num_breaks =
4297 std::max<int64_t>(0, (total_transit - 1) / max_interbreak);
4298 if (lb_span > max_interbreak) {
4299 min_num_breaks = std::max<int64_t>(min_num_breaks, 1);
4300 }
4301 if (min_num_breaks > num_feasible_breaks) return false;
4302 lb_span = std::max(
4303 lb_span,
4304 CapAdd(total_transit, CapProd(min_num_breaks, min_break_duration)));
4305 if (min_num_breaks > 0) {
4306 if (!data.start_cumul.SetMin(CapSub(break_start_min, max_interbreak))) {
4307 return false;
4308 }
4309 if (!data.end_cumul.SetMax(CapAdd(break_end_max, max_interbreak))) {
4310 return false;
4311 }
4312 }
4313 }
4314 if (!data.span.SetMin(lb_span)) return false;
4315 // Merge span lb information directly in start/end variables.
4316 start_max = std::min(start_max, CapSub(end_max, lb_span));
4317 if (!data.start_cumul.SetMax(start_max)) return false;
4318 end_min = std::max(end_min, CapAdd(start_min, lb_span));
4319 if (!data.end_cumul.SetMin(end_min)) return false;
4320 }
4321 return true;
4322}
4323
4324namespace {
4325
4326class LightVehicleBreaksFilter : public LocalSearchFilter {
4327 public:
4328 LightVehicleBreaksFilter(std::unique_ptr<LightVehicleBreaksChecker> checker,
4329 absl::string_view dimension_name)
4330 : checker_(std::move(checker)),
4331 name_(absl::StrCat("LightVehicleBreaksFilter(", dimension_name, ")")) {}
4332
4333 std::string DebugString() const override { return name_; }
4334
4335 void Relax(const Assignment*, const Assignment*) override {
4336 checker_->Relax();
4337 }
4338
4339 bool Accept(const Assignment*, const Assignment*, int64_t, int64_t) override {
4340 return checker_->Check();
4341 }
4342
4343 void Synchronize(const Assignment*, const Assignment*) override {
4344 checker_->Check();
4345 }
4346
4347 private:
4348 std::unique_ptr<LightVehicleBreaksChecker> checker_;
4349 const std::string name_;
4350};
4351
4352} // namespace
4353
4355 Solver* solver, std::unique_ptr<LightVehicleBreaksChecker> checker,
4356 absl::string_view dimension_name) {
4357 LightVehicleBreaksFilter* filter =
4358 new LightVehicleBreaksFilter(std::move(checker), dimension_name);
4359 return solver->RevAlloc(filter);
4360}
4361
4363 elements_.clear();
4364 tree_location_.clear();
4365 nodes_.clear();
4366 for (auto& layer : tree_layers_) layer.clear();
4367}
4368
4370 // New elements are elements_[i] for i in [begin_index, end_index).
4371 const int begin_index = tree_location_.size();
4372 const int end_index = elements_.size();
4373 DCHECK_LE(begin_index, end_index);
4374 if (begin_index >= end_index) return;
4375 // Gather all heights, sort and unique them, this makes up the list of
4376 // pivot heights of the underlying tree, with an inorder traversal.
4377 // TODO(user): investigate whether balancing the tree using the
4378 // number of occurrences of each height would be beneficial.
4379 // TODO(user): use a heap-like encoding for the binary search tree:
4380 // children of i at 2*i and 2*i+1. Better cache line utilization.
4381 const int old_node_size = nodes_.size();
4382 for (int i = begin_index; i < end_index; ++i) {
4383 nodes_.push_back({.pivot_height = elements_[i].height, .pivot_index = -1});
4384 }
4385 std::sort(nodes_.begin() + old_node_size, nodes_.end());
4386 nodes_.erase(std::unique(nodes_.begin() + old_node_size, nodes_.end()),
4387 nodes_.end());
4388
4389 // Remember location of the tree representation for this range of elements.
4390 // tree_location_ may be smaller than elements_, extend it if needed.
4391 const int new_node_size = nodes_.size();
4392 tree_location_.resize(end_index, {.node_begin = old_node_size,
4393 .node_end = new_node_size,
4394 .sequence_first = begin_index});
4395
4396 // Add and extend layers if needed.
4397 // The amount of layers needed is 1 + ceil(log(sequence size)).
4398 const int num_layers =
4399 2 + MostSignificantBitPosition32(new_node_size - old_node_size - 1);
4400 if (tree_layers_.size() <= num_layers) tree_layers_.resize(num_layers);
4401 for (int l = 0; l < num_layers; ++l) {
4402 tree_layers_[l].resize(end_index,
4403 {.prefix_sum = 0, .left_index = -1, .is_left = 0});
4404 }
4405
4406 // Fill all relevant locations of the tree, and record tree navigation
4407 // information. This recursive function has at most num_layers call depth.
4408 const auto fill_subtree = [this](auto& fill_subtree, int layer,
4409 int node_begin, int node_end,
4410 int range_begin, int range_end) {
4411 DCHECK_LT(node_begin, node_end);
4412 DCHECK_LT(range_begin, range_end);
4413 // Precompute prefix sums of range [range_begin, range_end).
4414 int64_t sum = 0;
4415 for (int i = range_begin; i < range_end; ++i) {
4416 sum += elements_[i].weight;
4417 tree_layers_[layer][i].prefix_sum = sum;
4418 }
4419 if (node_begin + 1 == node_end) return;
4420 // Range has more than one height, partition it.
4421 // Record layer l -> l+1 sequence index mapping:
4422 // - if height < pivot, record where this element will be in layer l+1.
4423 // - if height >= pivot, record where next <= pivot will be in layer l+1.
4424 const int node_mid = node_begin + (node_end - node_begin) / 2;
4425 const int64_t pivot_height = nodes_[node_mid].pivot_height;
4426 int pivot_index = range_begin;
4427 for (int i = range_begin; i < range_end; ++i) {
4428 tree_layers_[layer][i].left_index = pivot_index;
4429 tree_layers_[layer][i].is_left = elements_[i].height < pivot_height;
4430 if (elements_[i].height < pivot_height) ++pivot_index;
4431 }
4432 nodes_[node_mid].pivot_index = pivot_index;
4433 // TODO(user): stable_partition allocates memory,
4434 // find a way to fill layers without this.
4435 std::stable_partition(
4436 elements_.begin() + range_begin, elements_.begin() + range_end,
4437 [pivot_height](const auto& el) { return el.height < pivot_height; });
4438
4439 fill_subtree(fill_subtree, layer + 1, node_begin, node_mid, range_begin,
4440 pivot_index);
4441 fill_subtree(fill_subtree, layer + 1, node_mid, node_end, pivot_index,
4442 range_end);
4443 };
4444 fill_subtree(fill_subtree, 0, old_node_size, new_node_size, begin_index,
4445 end_index);
4446}
4447
4448int64_t WeightedWaveletTree::RangeSumWithThreshold(int64_t threshold_height,
4449 int begin_index,
4450 int end_index) const {
4451 DCHECK_LE(begin_index, end_index); // Range can be empty, but not reversed.
4452 DCHECK_LE(end_index, tree_location_.size());
4453 DCHECK_EQ(tree_location_.size(), elements_.size()); // No pending elements.
4454 if (begin_index >= end_index) return 0;
4455 auto [node_begin, node_end, sequence_first_index] =
4456 tree_location_[begin_index];
4457 DCHECK_EQ(tree_location_[end_index - 1].sequence_first,
4458 sequence_first_index); // Range is included in a single sequence.
4459 ElementRange range{
4460 .range_first_index = begin_index,
4461 .range_last_index = end_index - 1,
4462 .range_first_is_node_first = begin_index == sequence_first_index};
4463 // Answer in O(1) for the common case where max(heights) < threshold.
4464 if (nodes_[node_end - 1].pivot_height < threshold_height) return 0;
4465
4466 int64_t sum = 0;
4467 int64_t min_height_of_current_node = nodes_[node_begin].pivot_height;
4468 for (int l = 0; !range.Empty(); ++l) {
4469 const ElementInfo* elements = tree_layers_[l].data();
4470 if (threshold_height <= min_height_of_current_node) {
4471 // Query or subquery threshold covers all elements of this node.
4472 // This allows to be O(1) when the query's threshold is <= min(heights).
4473 sum += range.Sum(elements);
4474 return sum;
4475 } else if (node_begin + 1 == node_end) {
4476 // This node is a leaf, its height is < threshold, stop descent here.
4477 return sum;
4478 }
4479
4480 const int node_mid = node_begin + (node_end - node_begin) / 2;
4481 const auto [pivot_height, pivot_index] = nodes_[node_mid];
4482 const ElementRange right = range.RightSubRange(elements, pivot_index);
4483 if (threshold_height < pivot_height) {
4484 // All elements of the right child have their height above the threshold,
4485 // we can project the range to the right child and add the whole subrange.
4486 if (!right.Empty()) sum += right.Sum(tree_layers_[l + 1].data());
4487 // Go to the left child.
4488 range = range.LeftSubRange(elements);
4489 node_end = node_mid;
4490 } else {
4491 // Go to the right child.
4492 range = right;
4493 node_begin = node_mid;
4494 min_height_of_current_node = pivot_height;
4495 }
4496 }
4497 return sum;
4498}
4499
4501 const PathState* path_state, std::vector<int64_t> force_start_min,
4502 std::vector<int64_t> force_end_min, std::vector<int> force_class,
4503 std::vector<const std::function<int64_t(int64_t)>*> force_per_class,
4504 std::vector<int> distance_class,
4505 std::vector<const std::function<int64_t(int64_t, int64_t)>*>
4506 distance_per_class,
4507 std::vector<EnergyCost> path_energy_cost,
4508 std::vector<bool> path_has_cost_when_empty)
4509 : path_state_(path_state),
4510 force_start_min_(std::move(force_start_min)),
4511 force_end_min_(std::move(force_end_min)),
4512 force_class_(std::move(force_class)),
4513 distance_class_(std::move(distance_class)),
4514 force_per_class_(std::move(force_per_class)),
4515 distance_per_class_(std::move(distance_per_class)),
4516 path_energy_cost_(std::move(path_energy_cost)),
4517 path_has_cost_when_empty_(std::move(path_has_cost_when_empty)),
4518 maximum_range_query_size_(4 * path_state_->NumNodes()),
4519 force_rmq_index_of_node_(path_state_->NumNodes()),
4520 threshold_query_index_of_node_(path_state_->NumNodes()) {
4521 const int num_nodes = path_state_->NumNodes();
4522 cached_force_.resize(num_nodes);
4523 cached_distance_.resize(num_nodes);
4524 FullCacheAndPrecompute();
4525 committed_total_cost_ = 0;
4526 committed_path_cost_.assign(path_state_->NumPaths(), 0);
4527 const int num_paths = path_state_->NumPaths();
4528 for (int path = 0; path < num_paths; ++path) {
4529 committed_path_cost_[path] = ComputePathCost(path);
4530 CapAddTo(committed_path_cost_[path], &committed_total_cost_);
4531 }
4532 accepted_total_cost_ = committed_total_cost_;
4533}
4534
4536 if (path_state_->IsInvalid()) return true;
4537 accepted_total_cost_ = committed_total_cost_;
4538 for (const int path : path_state_->ChangedPaths()) {
4539 accepted_total_cost_ =
4540 CapSub(accepted_total_cost_, committed_path_cost_[path]);
4541 CapAddTo(ComputePathCost(path), &accepted_total_cost_);
4542 if (accepted_total_cost_ == kint64max) return false;
4543 }
4544 return true;
4545}
4546
4547void PathEnergyCostChecker::CacheAndPrecomputeRangeQueriesOfPath(int path) {
4548 // Cache force and distance evaluations,
4549 // precompute force RMQ, energy/distance threshold queries.
4550 const auto& force_evaluator = *force_per_class_[force_class_[path]];
4551 const auto& distance_evaluator = *distance_per_class_[distance_class_[path]];
4552 int force_index = force_rmq_.TableSize();
4553 int threshold_index = energy_query_.TreeSize();
4554 int64_t total_force = 0;
4555
4556 const int start_node = path_state_->Start(path);
4557 int prev_node = start_node;
4558
4559 for (const int node : path_state_->Nodes(path)) {
4560 if (prev_node != node) {
4561 const int64_t distance = distance_evaluator(prev_node, node);
4562 cached_distance_[prev_node] = distance;
4563 energy_query_.PushBack(total_force, total_force * distance);
4564 distance_query_.PushBack(total_force, distance);
4565 prev_node = node;
4566 }
4567 threshold_query_index_of_node_[node] = threshold_index++;
4568 force_rmq_.PushBack(total_force);
4569 force_rmq_index_of_node_[node] = force_index++;
4570 const int64_t force = force_evaluator(node);
4571 cached_force_[node] = force;
4572 total_force += force;
4573 }
4574 force_rmq_.MakeTableFromNewElements();
4575 energy_query_.MakeTreeFromNewElements();
4576 distance_query_.MakeTreeFromNewElements();
4577}
4578
4579void PathEnergyCostChecker::IncrementalCacheAndPrecompute() {
4580 for (const int path : path_state_->ChangedPaths()) {
4581 CacheAndPrecomputeRangeQueriesOfPath(path);
4582 }
4583}
4584
4585void PathEnergyCostChecker::FullCacheAndPrecompute() {
4586 force_rmq_.Clear();
4587 // Rebuild all paths.
4588 const int num_paths = path_state_->NumPaths();
4589 for (int path = 0; path < num_paths; ++path) {
4590 CacheAndPrecomputeRangeQueriesOfPath(path);
4591 }
4592}
4593
4595 int change_size = path_state_->ChangedPaths().size();
4596 for (const int path : path_state_->ChangedPaths()) {
4597 for (const auto chain : path_state_->Chains(path)) {
4598 change_size += chain.NumNodes();
4599 }
4600 committed_total_cost_ =
4601 CapSub(committed_total_cost_, committed_path_cost_[path]);
4602 committed_path_cost_[path] = ComputePathCost(path);
4603 CapAddTo(committed_path_cost_[path], &committed_total_cost_);
4604 }
4605
4606 const int current_layer_size = force_rmq_.TableSize();
4607 if (current_layer_size + change_size <= maximum_range_query_size_) {
4608 IncrementalCacheAndPrecompute();
4609 } else {
4610 FullCacheAndPrecompute();
4611 }
4612}
4613
4614int64_t PathEnergyCostChecker::ComputePathCost(int64_t path) const {
4615 const int path_force_class = force_class_[path];
4616 const auto& force_evaluator = *force_per_class_[path_force_class];
4617
4618 // Find minimal force at which to start.
4619 int64_t total_force = force_start_min_[path];
4620 int64_t min_force = total_force;
4621 int num_path_nodes = 0;
4622 int prev_node = path_state_->Start(path);
4623 for (const auto chain : path_state_->Chains(path)) {
4624 num_path_nodes += chain.NumNodes();
4625 // Add force needed to go from prev_node to chain.First() if needed.
4626 if (chain.First() != prev_node) {
4627 const int64_t force_to_node = force_evaluator(prev_node);
4628 CapAddTo(force_to_node, &total_force);
4629 min_force = std::min(min_force, total_force);
4630 prev_node = chain.First();
4631 }
4632
4633 // Add force needed to go from chain.First() to chain.Last().
4634 const int chain_path = path_state_->Path(chain.First());
4635 const int chain_force_class =
4636 chain_path < 0 ? -1 : force_class_[chain_path];
4637 const bool force_is_cached = chain_force_class == path_force_class;
4638 if (force_is_cached && chain.NumNodes() >= 2) {
4639 const int first_index = force_rmq_index_of_node_[chain.First()];
4640 const int last_index = force_rmq_index_of_node_[chain.Last()];
4641 // Get total force at first, last and lowest point of the chain.
4642 const int64_t first_total_force = force_rmq_.array()[first_index];
4643 const int64_t last_total_force = force_rmq_.array()[last_index];
4644 const int64_t min_total_force =
4645 force_rmq_.RangeMinimum(first_index, last_index);
4646 // Compute running minimum total force and total force at chain.Last().
4647 min_force = std::min(min_force,
4648 total_force - first_total_force + min_total_force);
4649 CapAddTo(last_total_force - first_total_force, &total_force);
4650 prev_node = chain.Last();
4651 } else {
4652 for (const int node : chain.WithoutFirstNode()) {
4653 const int64_t force = force_is_cached ? cached_force_[prev_node]
4654 : force_evaluator(prev_node);
4655 CapAddTo(force, &total_force);
4656 min_force = std::min(min_force, total_force);
4657 prev_node = node;
4658 }
4659 }
4660 }
4661 if (num_path_nodes == 2 && !path_has_cost_when_empty_[path]) return 0;
4662 // Force must be offset in order to be all of:
4663 // - >= force_start_min_[path] at start
4664 // - >= force_end_min_[path] at end
4665 // - >= 0 at all intermediate nodes
4666 // We set the accumulator to the minimal offset that allows this.
4667 total_force = std::max<int64_t>(
4668 {0, CapOpp(min_force), CapSub(force_end_min_[path], total_force)});
4669 CapAddTo(force_start_min_[path], &total_force);
4670
4671 // Compute energy, below and above threshold.
4672 const int path_distance_class = distance_class_[path];
4673 const auto& distance_evaluator = *distance_per_class_[path_distance_class];
4674 const EnergyCost& cost = path_energy_cost_[path];
4675 int64_t energy_below = 0;
4676 int64_t energy_above = 0;
4677 prev_node = path_state_->Start(path);
4678 for (const auto chain : path_state_->Chains(path)) {
4679 // Bring cost computation to first node of the chain.
4680 if (chain.First() != prev_node) {
4681 const int64_t distance = distance_evaluator(prev_node, chain.First());
4682 CapAddTo(force_evaluator(prev_node), &total_force);
4683 CapAddTo(CapProd(std::min(cost.threshold, total_force), distance),
4684 &energy_below);
4685 const int64_t force_above =
4686 std::max<int64_t>(0, CapSub(total_force, cost.threshold));
4687 CapAddTo(CapProd(force_above, distance), &energy_above);
4688 prev_node = chain.First();
4689 }
4690
4691 // Inside chain, try to reuse cached forces and distances instead of more
4692 // costly calls to evaluators.
4693 const int chain_path = path_state_->Path(chain.First());
4694 const int chain_force_class =
4695 chain_path < 0 ? -1 : force_class_[chain_path];
4696 const int chain_distance_class =
4697 chain_path < 0 ? -1 : distance_class_[chain_path];
4698 const bool force_is_cached = chain_force_class == path_force_class;
4699 const bool distance_is_cached = chain_distance_class == path_distance_class;
4700
4701 if (force_is_cached && distance_is_cached && chain.NumNodes() >= 2) {
4702 const int first_index = threshold_query_index_of_node_[chain.First()];
4703 const int last_index = threshold_query_index_of_node_[chain.Last()];
4704
4705 const int64_t zero_total_energy = energy_query_.RangeSumWithThreshold(
4706 kint64min, first_index, last_index);
4707 const int64_t total_distance = distance_query_.RangeSumWithThreshold(
4708 kint64min, first_index, last_index);
4709
4710 // In the following, zero_ values are those computed with the hypothesis
4711 // that the force at the start node is zero.
4712 // The total_force at chain.First() is in general not the same in the
4713 // candidate path and in the zero_ case. We can still query the energy and
4714 // distance totals incurred by transitions above the actual threshold
4715 // during the chain, by offsetting the queries to zero_threshold.
4716 const int64_t zero_total_force_first =
4717 force_rmq_.array()[force_rmq_index_of_node_[chain.First()]];
4718 const int64_t zero_threshold =
4719 CapSub(cost.threshold, CapSub(total_force, zero_total_force_first));
4720 // "High" transitions are those that occur with a force at or above the
4721 // threshold. "High" energy is the sum of energy values during high
4722 // transitions, same for "high" distance.
4723 const int64_t zero_high_energy = energy_query_.RangeSumWithThreshold(
4724 zero_threshold, first_index, last_index);
4725 const int64_t zero_high_distance = distance_query_.RangeSumWithThreshold(
4726 zero_threshold, first_index, last_index);
4727 // "Above" energy is the energy caused by total_force above the threshold.
4728 // Since "above" energy is only incurred during "high" transitions, it can
4729 // be computed from "high" energy knowing distance and threshold.
4730 const int64_t zero_energy_above =
4731 CapSub(zero_high_energy, CapProd(zero_high_distance, zero_threshold));
4732 // To compute the energy values of the candidate, the force dimension
4733 // must be offset back to the candidate's total force.
4734 // Only the "below" energy is changed by the offset, the zero_ energy
4735 // above the zero_ threshold was computed to be the same as the candidate
4736 // energy above the actual threshold.
4737 CapAddTo(zero_energy_above, &energy_above);
4738 CapAddTo(CapAdd(CapSub(zero_total_energy, zero_energy_above),
4739 CapProd(total_distance,
4740 CapSub(cost.threshold, zero_threshold))),
4741 &energy_below);
4742 // We reuse the partial sum of the force query to compute the sum of
4743 // forces incurred by the chain,
4744 const int64_t zero_total_force_last =
4745 force_rmq_.array()[force_rmq_index_of_node_[chain.Last()]];
4746 CapAddTo(CapSub(zero_total_force_last, zero_total_force_first),
4747 &total_force);
4748 prev_node = chain.Last();
4749 } else {
4750 for (const int node : chain.WithoutFirstNode()) {
4751 const int64_t force = force_is_cached ? cached_force_[prev_node]
4752 : force_evaluator(prev_node);
4753 const int64_t distance = distance_is_cached
4754 ? cached_distance_[prev_node]
4755 : distance_evaluator(prev_node, node);
4756 CapAddTo(force, &total_force);
4757 CapAddTo(CapProd(std::min(cost.threshold, total_force), distance),
4758 &energy_below);
4759 const int64_t force_above =
4760 std::max<int64_t>(0, CapSub(total_force, cost.threshold));
4761 CapAddTo(CapProd(force_above, distance), &energy_above);
4762 prev_node = node;
4763 }
4764 }
4765 }
4766
4767 return CapAdd(CapProd(energy_below, cost.cost_per_unit_below_threshold),
4768 CapProd(energy_above, cost.cost_per_unit_above_threshold));
4769}
4770
4771namespace {
4772
4773class PathEnergyCostFilter : public LocalSearchFilter {
4774 public:
4775 std::string DebugString() const override { return name_; }
4776 PathEnergyCostFilter(std::unique_ptr<PathEnergyCostChecker> checker,
4777 absl::string_view energy_name)
4778 : checker_(std::move(checker)),
4779 name_(absl::StrCat("PathEnergyCostFilter(", energy_name, ")")) {}
4780
4781 bool Accept(const Assignment*, const Assignment*, int64_t objective_min,
4782 int64_t objective_max) override {
4783 if (objective_max > kint64max / 2) return true;
4784 if (!checker_->Check()) return false;
4785 const int64_t cost = checker_->AcceptedCost();
4786 return objective_min <= cost && cost <= objective_max;
4787 }
4788
4789 void Synchronize(const Assignment*, const Assignment*) override {
4790 checker_->Commit();
4791 }
4792
4793 int64_t GetSynchronizedObjectiveValue() const override {
4794 return checker_->CommittedCost();
4795 }
4796 int64_t GetAcceptedObjectiveValue() const override {
4797 return checker_->AcceptedCost();
4798 }
4799
4800 private:
4801 std::unique_ptr<PathEnergyCostChecker> checker_;
4802 const std::string name_;
4803};
4804
4805} // namespace
4806
4808 Solver* solver, std::unique_ptr<PathEnergyCostChecker> checker,
4809 absl::string_view dimension_name) {
4810 PathEnergyCostFilter* filter =
4811 new PathEnergyCostFilter(std::move(checker), dimension_name);
4812 return solver->RevAlloc(filter);
4813}
4814
4815// TODO(user): Implement same-vehicle filter. Could be merged with node
4816// precedence filter.
4817
4818} // namespace operations_research
const std::vector< E > & elements() const
AssignmentContainer< IntVar, IntVarElement > IntContainer
const IntContainer & IntVarContainer() const
Generic path-based filter class.
BasePathFilter(const std::vector< IntVar * > &nexts, int next_domain_size, const PathsMetadata &paths_metadata)
bool Accept(const Assignment *delta, const Assignment *deltadelta, int64_t objective_min, int64_t objective_max) override
void OnSynchronize(const Assignment *delta) override
DimensionChecker(const PathState *path_state, std::vector< Interval > path_capacity, std::vector< int > path_class, std::vector< std::function< Interval(int64_t, int64_t)> > demand_per_path_class, std::vector< Interval > node_capacity, int min_range_size_for_riq=kOptimalMinRangeSizeForRIQ)
absl::Span< const int64_t > CommittedTravels(int path) const
Returns a const view of the travels of the path, in the committed state.
absl::Span< Interval > MutableCumuls(int path)
Returns a mutable view of the cumul mins of the path, in the current state.
int NumNodes(int path) const
Returns the number of nodes of the path, in the current state.
absl::Span< const int > Nodes(int path) const
Returns a const view of the nodes of the path, in the current state.
absl::Span< Interval > MutableTransits(int path)
Returns a mutable view of the transits of the path, in the current state.
absl::Span< int64_t > MutableTravelSums(int path)
Returns a mutable view of the travel sums of the path in the current state.
absl::Span< int64_t > MutableTravels(int path)
Returns a mutable view of the travels of the path, in the current state.
std::vector< VehicleBreak > & MutableVehicleBreaks(int path)
absl::Span< const int > CommittedNodes(int path) const
Returns a const view of the nodes of the path, in the committed state.
absl::Span< const int64_t > TravelSums(int path) const
Returns a const view of the travel sums of the path, in the current state.
virtual int64_t Min() const =0
virtual int64_t Max() const =0
IntVarLocalSearchFilter(const std::vector< IntVar * > &vars)
bool FindIndex(IntVar *const var, int64_t *index) const
IntVar * Var() override
Creates a variable from the expression.
LightVehicleBreaksChecker(PathState *path_state, std::vector< PathData > path_data)
PathEnergyCostChecker(const PathState *path_state, std::vector< int64_t > force_start_min, std::vector< int64_t > force_end_min, std::vector< int > force_class, std::vector< const std::function< int64_t(int64_t)> * > force_per_class, std::vector< int > distance_class, std::vector< const std::function< int64_t(int64_t, int64_t)> * > distance_per_class, std::vector< EnergyCost > path_energy_cost, std::vector< bool > path_has_cost_when_empty)
A ChainRange is a range of Chains, committed or not.
void ChangeLoops(absl::Span< const int > new_loops)
Describes the nodes that are newly loops in this change.
const std::vector< int > & ChangedLoops() const
Returns the set of loops that were added by the change.
void Reset()
Sets all paths to start->end, all other nodes to kUnassigned.
const std::vector< int > & ChangedPaths() const
void Commit()
Set the current state G1 as committed. See class comment for details.
ChainRange Chains(int path) const
Returns the current range of chains of path.
static constexpr int kUnassigned
State-dependent accessors.
void Revert()
Erase incremental changes. See class comment for details.
NodeRange Nodes(int path) const
Returns the current range of nodes of path.
PathState(int num_nodes, std::vector< int > path_start, std::vector< int > path_end)
void ChangePath(int path, absl::Span< const ChainBounds > chains)
State modifiers.
int Start(int path) const
Returns the start of a path.
absl::Span< int64_t > MutablePostVisits(int path)
Returns a mutable view of the post-visits of the path in the current state.
void ChangePathSize(int path, int new_num_nodes)
Turns new nodes into a new path, allocating pre-post visit values for it.
absl::Span< int64_t > MutablePreVisits(int path)
Returns a mutable view of the pre-visits of the path, in the current state.
const std::vector< T > & array() const
Returns the concatenated sequence of all elements in all arrays.
int TableSize() const
Returns the number of elements in sparse tables, excluding new elements.
For the time being, Solver is neither MT_SAFE nor MT_HOT.
void PushBack(int64_t height, int64_t weight)
Adds an element at index this->Size().
int64_t RangeSumWithThreshold(int64_t threshold_height, int begin_index, int end_index) const
int TreeSize() const
Returns the total number of elements in trees.
Collection::value_type::second_type & LookupOrInsert(Collection *const collection, const typename Collection::value_type::first_type &key, const typename Collection::value_type::second_type &value)
Definition map_util.h:242
const MapUtilMappedT< Collection > & FindWithDefault(const Collection &collection, const KeyType &key, const MapUtilMappedT< Collection > &value)
Definition map_util.h:36
For infeasible and unbounded see Not checked if options check_solutions_if_inf_or_unbounded and the If options first_solution_only is false
problem is infeasible or unbounded (default).
Definition matchers.h:468
std::function< int64_t(const Model &)> Value(IntegerVariable v)
This checks that the variable is fixed.
Definition integer.h:1601
In SWIG mode, we don't want anything besides these top-level includes.
IntVarLocalSearchFilter * MakeVehicleAmortizedCostFilter(const RoutingModel &routing_model)
Returns a filter computing vehicle amortized costs.
int64_t CapAdd(int64_t x, int64_t y)
bool FillDimensionValuesFromRoutingDimension(int path, int64_t capacity, int64_t span_upper_bound, absl::Span< const DimensionValues::Interval > cumul_of_node, absl::Span< const DimensionValues::Interval > slack_of_node, absl::AnyInvocable< int64_t(int64_t, int64_t) const > evaluator, DimensionValues &dimension_values)
IntVarLocalSearchFilter * MakeRouteConstraintFilter(const RoutingModel &routing_model)
Returns a filter tracking route constraints.
void CapAddTo(int64_t x, int64_t *y)
LocalSearchFilter * MakePickupDeliveryFilter(const RoutingModel &routing_model, const PathState *path_state, const std::vector< PickupDeliveryPair > &pairs, const std::vector< RoutingModel::PickupAndDeliveryPolicy > &vehicle_policies)
IntVarLocalSearchFilter * MakeMaxActiveVehiclesFilter(const RoutingModel &routing_model)
Returns a filter ensuring that max active vehicles constraints are enforced.
void AppendLightWeightDimensionFilters(const PathState *path_state, const std::vector< RoutingDimension * > &dimensions, std::vector< LocalSearchFilterManager::FilterEvent > *filters)
int64_t ComputeBestVehicleToResourceAssignment(absl::Span< const int > vehicles, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, std::vector< int > > &resource_indices_per_class, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, absl::flat_hash_set< int > > &ignored_resources_per_class, std::function< const std::vector< int64_t > *(int)> vehicle_to_resource_class_assignment_costs, std::vector< int > *resource_indices)
IntVarLocalSearchFilter * MakeNodeDisjunctionFilter(const RoutingModel &routing_model, bool filter_cost)
Returns a filter ensuring that node disjunction constraints are enforced.
int64_t CapSub(int64_t x, int64_t y)
LocalSearchFilter * MakePathEnergyCostFilter(Solver *solver, std::unique_ptr< PathEnergyCostChecker > checker, absl::string_view dimension_name)
LinearExpr operator-(LinearExpr lhs, const LinearExpr &rhs)
void FillPrePostVisitValues(int path, const DimensionValues &dimension_values, absl::AnyInvocable< int64_t(int64_t, int64_t) const > pre_travel_evaluator, absl::AnyInvocable< int64_t(int64_t, int64_t) const > post_travel_evaluator, PrePostVisitValues &visit_values)
void CapSubFrom(int64_t amount, int64_t *target)
Updates *target with CapSub(*target, amount).
IntVarLocalSearchFilter * MakeCumulBoundsPropagatorFilter(const RoutingDimension &dimension)
Returns a filter handling dimension cumul bounds.
LocalSearchFilter * MakePathStateFilter(Solver *solver, std::unique_ptr< PathState > path_state, const std::vector< IntVar * > &nexts)
ClosedInterval::Iterator end(ClosedInterval interval)
LocalSearchFilter * MakeLightVehicleBreaksFilter(Solver *solver, std::unique_ptr< LightVehicleBreaksChecker > checker, absl::string_view dimension_name)
IntVarLocalSearchFilter * MakePathCumulFilter(const RoutingDimension &dimension, bool propagate_own_objective_value, bool filter_objective_cost, bool may_use_optimizers)
Returns a filter handling dimension costs and constraints.
IntVarLocalSearchFilter * MakeCPFeasibilityFilter(RoutingModel *routing_model)
Returns a filter checking the current solution using CP propagation.
bool ComputeVehicleToResourceClassAssignmentCosts(int v, double solve_duration_ratio, const RoutingModel::ResourceGroup &resource_group, const util_intops::StrongVector< RoutingModel::ResourceClassIndex, absl::flat_hash_set< int > > &ignored_resources_per_class, const std::function< int64_t(int64_t)> &next_accessor, const std::function< int64_t(int64_t, int64_t)> &transit_accessor, bool optimize_vehicle_costs, LocalDimensionCumulOptimizer *lp_optimizer, LocalDimensionCumulOptimizer *mp_optimizer, std::vector< int64_t > *assignment_costs, std::vector< std::vector< int64_t > > *cumul_values, std::vector< std::vector< int64_t > > *break_values)
LinearExpr operator+(LinearExpr lhs, const LinearExpr &rhs)
int64_t CapProd(int64_t x, int64_t y)
LocalSearchFilter * MakeVehicleVarFilter(const RoutingModel &routing_model, const PathState *path_state)
Returns a filter checking that vehicle variable domains are respected.
static const int kUnassigned
--— Routing model --—
Definition routing.cc:507
void AppendDimensionCumulFilters(const std::vector< RoutingDimension * > &dimensions, const RoutingSearchParameters &parameters, bool filter_objective_cost, bool use_chain_cumul_filter, std::vector< LocalSearchFilterManager::FilterEvent > *filters)
LocalSearchFilter * MakeResourceAssignmentFilter(LocalDimensionCumulOptimizer *lp_optimizer, LocalDimensionCumulOptimizer *mp_optimizer, bool propagate_own_objective_value, bool filter_objective_cost)
bool PropagateLightweightVehicleBreaks(int path, DimensionValues &dimension_values, absl::Span< const std::pair< int64_t, int64_t > > interbreaks)
int MostSignificantBitPosition32(uint32_t n)
Definition bitset.h:276
IntVarLocalSearchFilter * MakeTypeRegulationsFilter(const RoutingModel &routing_model)
Returns a filter ensuring type regulation constraints are enforced.
IntVarLocalSearchFilter * MakeGlobalLPCumulFilter(GlobalDimensionCumulOptimizer *lp_optimizer, GlobalDimensionCumulOptimizer *mp_optimizer, bool filter_objective_cost)
Returns a filter checking global linear constraints and costs.
IntVarLocalSearchFilter * MakeActiveNodeGroupFilter(const RoutingModel &routing_model)
int64_t CapOpp(int64_t v)
Note(user): -kint64min != kint64max, but kint64max == ~kint64min.
LocalSearchFilter * MakeDimensionFilter(Solver *solver, std::unique_ptr< DimensionChecker > checker, absl::string_view dimension_name)
STL namespace.
trees with all degrees equal to
ABSL_FLAG(bool, routing_strong_debug_checks, false, "Run stronger checks in debug; these stronger tests might change " "the complexity of the code in particular.")
Implementation of local search filters for routing models.
static const int64_t kint64max
Definition types.h:32
static const int64_t kint64min
Definition types.h:31