Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
arc_flow_builder.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <set>
19#include <utility>
20#include <vector>
21
22#include "absl/container/flat_hash_map.h"
23#include "absl/strings/str_cat.h"
24#include "absl/strings/str_join.h"
25#include "absl/types/span.h"
30
31namespace operations_research {
32namespace packing {
33namespace {
34
35class ArcFlowBuilder {
36 public:
37 // Same arguments as BuildArcFlowGraph(): see the .h.
38 ArcFlowBuilder(const std::vector<int>& bin_dimensions,
39 absl::Span<const std::vector<int>> item_dimensions_by_type,
40 absl::Span<const int> demand_by_type);
41
42 // Builds the arc-flow graph.
43 ArcFlowGraph BuildVectorBinPackingGraph();
44
45 // For debugging purposes.tring(
46 // Returns the number of states explored in the dynamic programming phase.
47 int64_t NumDpStates() const;
48
49 private:
50 // All items data, regrouped for sorting purposes.
51 struct Item {
52 std::vector<int> dimensions;
53 int demand;
55
56 // Used to sort items by relative size.
57 double NormalizedSize(absl::Span<const int> bin_dimensions) const;
58 };
59
60 // State of the dynamic programming algorithm.
61 struct DpState {
64 std::vector<int> used_dimensions;
65 // DP State indices of the states that can be obtained by moving
66 // either "right" to (cur_item_index, cur_item_quantity++) or "up"
67 // to (cur_item_index++, cur_item_quantity=0). -1 if impossible.
70 };
71
72 // Add item iteratively to create all possible nodes in a forward pass.
73 void ForwardCreationPass(DpState* dp_state);
74 // Scan DP-nodes backward to relabels each nodes by increasing them as much
75 // as possible.
76 void BackwardCompressionPass(int state_index);
77 // Relabel nodes by decreasing them as much as possible.
78 void ForwardCompressionPass(const std::vector<int>& source_node);
79
80 // Can we fit one more item in the bin?
81 bool CanFitNewItem(absl::Span<const int> used_dimensions, int item) const;
82 // Create a new used_dimensions that is used_dimensions + item dimensions.
83 std::vector<int> AddItem(const std::vector<int>& used_dimensions,
84 int item) const;
85
86 // DpState helpers.
87 int LookupOrCreateDpState(int item, int quantity,
88 const std::vector<int>& used_dimensions);
89
90 const std::vector<int> bin_dimensions_;
91 std::vector<Item> items_;
92
93 typedef absl::flat_hash_map<std::vector<int>, int> VectorIntIntMap;
94 int GetOrCreateNode(const std::vector<int>& used_dimensions);
95
96 // We store all DP states in a dense vector, and remember their index
97 // in the dp_state_index_ map (we use a tri-dimensional indexing because
98 // it's faster for the hash part).
99 std::vector<DpState*> dp_states_;
100 std::vector<std::vector<VectorIntIntMap>> dp_state_index_;
101
102 // The ArcFlowGraph will have nodes which will correspond to "some"
103 // of the vector<int> representing the partial bin usages encountered during
104 // the algo. These two data structures map one to the other (note that nodes
105 // are dense integers).
106 absl::flat_hash_map<std::vector<int>, int> node_indices_;
107 std::vector<std::vector<int>> nodes_;
108
109 std::set<ArcFlowGraph::Arc> arcs_;
110};
111
112double ArcFlowBuilder::Item::NormalizedSize(
113 absl::Span<const int> bin_dimensions) const {
114 double size = 0.0;
115 for (int i = 0; i < bin_dimensions.size(); ++i) {
116 size += static_cast<double>(dimensions[i]) / bin_dimensions[i];
117 }
118 return size;
119}
120
121int64_t ArcFlowBuilder::NumDpStates() const {
122 int64_t res = 1; // We do not store the initial state.
123 for (const auto& it1 : dp_state_index_) {
124 for (const auto& it2 : it1) {
125 res += it2.size();
126 }
127 }
128 return res;
129}
130
131ArcFlowBuilder::ArcFlowBuilder(
132 const std::vector<int>& bin_dimensions,
133 absl::Span<const std::vector<int>> item_dimensions_by_type,
134 absl::Span<const int> demand_by_type)
135 : bin_dimensions_(bin_dimensions) {
136 // Checks dimensions.
137 for (int i = 0; i < bin_dimensions.size(); ++i) {
138 CHECK_GT(bin_dimensions[i], 0);
139 }
140
141 const int num_items = item_dimensions_by_type.size();
142 items_.resize(num_items);
143 for (int i = 0; i < num_items; ++i) {
144 items_[i].dimensions = item_dimensions_by_type[i];
145 items_[i].demand = demand_by_type[i];
146 items_[i].original_index = i;
147 }
148 std::sort(items_.begin(), items_.end(), [&](const Item& a, const Item& b) {
149 return a.NormalizedSize(bin_dimensions_) >
150 b.NormalizedSize(bin_dimensions_);
151 });
152}
153
154bool ArcFlowBuilder::CanFitNewItem(absl::Span<const int> used_dimensions,
155 int item) const {
156 for (int d = 0; d < bin_dimensions_.size(); ++d) {
157 if (used_dimensions[d] + items_[item].dimensions[d] > bin_dimensions_[d]) {
158 return false;
159 }
160 }
161 return true;
162}
163
164std::vector<int> ArcFlowBuilder::AddItem(
165 const std::vector<int>& used_dimensions, int item) const {
166 DCHECK(CanFitNewItem(used_dimensions, item));
167 std::vector<int> result = used_dimensions;
168 for (int d = 0; d < bin_dimensions_.size(); ++d) {
169 result[d] += items_[item].dimensions[d];
170 }
171 return result;
172}
173
174int ArcFlowBuilder::GetOrCreateNode(const std::vector<int>& used_dimensions) {
175 const auto& it = node_indices_.find(used_dimensions);
176 if (it != node_indices_.end()) {
177 return it->second;
178 }
179 const int index = node_indices_.size();
180 node_indices_[used_dimensions] = index;
181 nodes_.push_back(used_dimensions);
182 return index;
183}
184
185ArcFlowGraph ArcFlowBuilder::BuildVectorBinPackingGraph() {
186 // Initialize the DP states map.
187 dp_state_index_.resize(items_.size());
188 for (int i = 0; i < items_.size(); ++i) {
189 dp_state_index_[i].resize(items_[i].demand + 1);
190 }
191
192 // Explore all possible DP states (starting from the initial 'empty' state),
193 // and remember their ancestry.
194 std::vector<int> zero(bin_dimensions_.size(), 0);
195 dp_states_.push_back(new DpState({0, 0, zero, -1, -1}));
196 for (int i = 0; i < dp_states_.size(); ++i) {
197 ForwardCreationPass(dp_states_[i]);
198 }
199
200 // We can clear the dp_state_index map as it will not be used anymore.
201 // From now on, we will use the dp_states.used_dimensions to store the new
202 // labels in the backward pass.
203 const int64_t num_dp_states = NumDpStates();
204 dp_state_index_.clear();
205
206 // Backwards pass: "push" the bin dimensions as far as possible.
207 const int num_states = dp_states_.size();
208 std::vector<std::pair<int, int>> flat_deps;
209 for (int i = 0; i < dp_states_.size(); ++i) {
210 if (dp_states_[i]->up_child != -1) {
211 flat_deps.push_back(std::make_pair(dp_states_[i]->up_child, i));
212 }
213 if (dp_states_[i]->right_child != -1) {
214 flat_deps.push_back(std::make_pair(dp_states_[i]->right_child, i));
215 }
216 }
217 const std::vector<int> sorted_work =
219 for (const int w : sorted_work) {
220 BackwardCompressionPass(w);
221 }
222
223 // ForwardCreationPass again, push the bin dimensions as low as possible.
224 const std::vector<int> source_node = dp_states_[0]->used_dimensions;
225 // We can now delete the states stored in dp_states_.
226 gtl::STLDeleteElements(&dp_states_);
227 ForwardCompressionPass(source_node);
228
229 // We need to connect all nodes that corresponds to at least one item selected
230 // to the sink node.
231 const int sink_node_index = nodes_.size() - 1;
232 for (int node = 1; node < sink_node_index; ++node) {
233 arcs_.insert({node, sink_node_index, -1});
234 }
235
236 ArcFlowGraph result;
237 result.arcs.assign(arcs_.begin(), arcs_.end());
238 result.nodes.assign(nodes_.begin(), nodes_.end());
239 result.num_dp_states = num_dp_states;
240 return result;
241}
242
243int ArcFlowBuilder::LookupOrCreateDpState(
244 int item, int quantity, const std::vector<int>& used_dimensions) {
245 VectorIntIntMap& map = dp_state_index_[item][quantity];
246 const int index =
247 map.insert({used_dimensions, dp_states_.size()}).first->second;
248 if (index == dp_states_.size()) {
249 dp_states_.push_back(
250 new DpState({item, quantity, used_dimensions, -1, -1}));
251 }
252 return index;
253}
254
255void ArcFlowBuilder::ForwardCreationPass(DpState* dp_state) {
256 const int item = dp_state->cur_item_index;
257 const int quantity = dp_state->cur_item_quantity;
258 const std::vector<int>& used_dimensions = dp_state->used_dimensions;
259
260 // Explore path up.
261 if (item < items_.size() - 1) {
262 dp_state->up_child = LookupOrCreateDpState(item + 1, 0, used_dimensions);
263 } else {
264 dp_state->up_child = -1;
265 }
266
267 // Explore path right.
268 if (quantity < items_[item].demand && CanFitNewItem(used_dimensions, item)) {
269 const std::vector<int> added = AddItem(used_dimensions, item);
270 dp_state->right_child = LookupOrCreateDpState(item, quantity + 1, added);
271 } else {
272 dp_state->right_child = -1;
273 }
274}
275
276void ArcFlowBuilder::BackwardCompressionPass(int state_index) {
277 // The goal of this function is to fill this.
278 std::vector<int>& result = dp_states_[state_index]->used_dimensions;
279
280 // Inherit our result from the result one step up.
281 const int up_index = dp_states_[state_index]->up_child;
282 const std::vector<int>& result_up =
283 up_index == -1 ? bin_dimensions_ : dp_states_[up_index]->used_dimensions;
284 result = result_up;
285
286 // Adjust our result from the result one step right.
287 const int right_index = dp_states_[state_index]->right_child;
288 if (right_index == -1) return; // We're done.
289 const std::vector<int>& result_right =
290 dp_states_[right_index]->used_dimensions;
291 const Item& item = items_[dp_states_[state_index]->cur_item_index];
292 for (int d = 0; d < bin_dimensions_.size(); ++d) {
293 result[d] = std::min(result[d], result_right[d] - item.dimensions[d]);
294 }
295
296 // Insert the arc from the node to the "right" node.
297 const int node = GetOrCreateNode(result);
298 const int right_node = GetOrCreateNode(result_right);
299 DCHECK_NE(node, right_node);
300 arcs_.insert({node, right_node, item.original_index});
301 // Also insert the 'dotted' arc from the node to the "up" node (if different).
302 if (result != result_up) {
303 const int up_node = GetOrCreateNode(result_up);
304 arcs_.insert({node, up_node, -1});
305 }
306}
307
308// Reverse version of the backward pass.
309// Revisit states forward, and relabel nodes with the longest path in each
310// dimensions from the source. The only meaningfull difference is that we use
311// arcs and nodes, instead of dp_states.
312void ArcFlowBuilder::ForwardCompressionPass(
313 const std::vector<int>& source_node) {
314 const int num_nodes = node_indices_.size();
315 const int num_dims = bin_dimensions_.size();
316 std::set<ArcFlowGraph::Arc> new_arcs;
317 std::vector<std::vector<int>> new_nodes;
318 VectorIntIntMap new_node_indices;
319 std::vector<int> node_remap(num_nodes, -1);
320 // We need to revert the sorting of items as arcs store the original index.
321 std::vector<int> reverse_item_index_map(items_.size(), -1);
322 for (int i = 0; i < items_.size(); ++i) {
323 reverse_item_index_map[items_[i].original_index] = i;
324 }
325
326 std::vector<std::pair<int, int>> forward_deps;
327 std::vector<std::vector<ArcFlowGraph::Arc>> incoming_arcs(num_nodes);
328 for (const ArcFlowGraph::Arc& arc : arcs_) {
329 forward_deps.push_back(std::make_pair(arc.source, arc.destination));
330 incoming_arcs[arc.destination].push_back(arc);
331 }
332
333 const std::vector<int> sorted_work =
335
336 const int old_source_node = GetOrCreateNode(source_node);
337 const int old_sink_node = GetOrCreateNode(bin_dimensions_);
338 CHECK_EQ(sorted_work.front(), old_source_node);
339 CHECK_EQ(sorted_work.back(), old_sink_node);
340
341 // Process nodes in order and remap state to max(previous_state + item
342 // dimensions).
343 for (const int w : sorted_work) {
344 std::vector<int> new_used(num_dims, 0);
345 if (w == sorted_work.back()) { // Do not compress the sink node.
346 new_used = bin_dimensions_;
347 } else {
348 for (const ArcFlowGraph::Arc& arc : incoming_arcs[w]) {
349 const int item =
350 arc.item_index == -1 ? -1 : reverse_item_index_map[arc.item_index];
351 const int prev_node = node_remap[arc.source];
352 const std::vector<int>& prev = new_nodes[prev_node];
353 DCHECK_NE(prev_node, -1);
354 for (int d = 0; d < num_dims; ++d) {
355 if (item != -1) {
356 new_used[d] =
357 std::max(new_used[d], prev[d] + items_[item].dimensions[d]);
358 } else {
359 new_used[d] = std::max(new_used[d], prev[d]);
360 }
361 }
362 }
363 }
364 const auto& it = new_node_indices.find(new_used);
365 if (it != new_node_indices.end()) {
366 node_remap[w] = it->second;
367 } else {
368 const int new_index = new_nodes.size();
369 new_nodes.push_back(new_used);
370 new_node_indices[new_used] = new_index;
371 node_remap[w] = new_index;
372 }
373 }
374 // Remap arcs.
375 for (const ArcFlowGraph::Arc& arc : arcs_) {
376 CHECK_NE(node_remap[arc.source], -1);
377 CHECK_NE(node_remap[arc.destination], -1);
378 // Remove loss arcs between merged nodes.
379 if (arc.item_index == -1 &&
380 node_remap[arc.source] == node_remap[arc.destination])
381 continue;
382 new_arcs.insert(
383 {node_remap[arc.source], node_remap[arc.destination], arc.item_index});
384 }
385 VLOG(1) << "Reduced nodes from " << num_nodes << " to " << new_nodes.size();
386 VLOG(1) << "Reduced arcs from " << arcs_.size() << " to " << new_arcs.size();
387 nodes_ = new_nodes;
388 arcs_ = new_arcs;
389 CHECK_NE(node_remap[old_source_node], -1);
390 CHECK_EQ(0, node_remap[old_source_node]);
391 CHECK_NE(node_remap[old_sink_node], -1);
392 CHECK_EQ(nodes_.size() - 1, node_remap[old_sink_node]);
393}
394
395} // namespace
396
397bool ArcFlowGraph::Arc::operator<(const ArcFlowGraph::Arc& other) const {
398 if (source != other.source) return source < other.source;
399 if (destination != other.destination) return destination < other.destination;
400 return item_index < other.item_index;
401}
402
404 const std::vector<int>& bin_dimensions,
405 absl::Span<const std::vector<int>> item_dimensions_by_type,
406 absl::Span<const int> demand_by_type) {
407 ArcFlowBuilder afb(bin_dimensions, item_dimensions_by_type, demand_by_type);
408 return afb.BuildVectorBinPackingGraph();
409}
410
411} // namespace packing
412} // namespace operations_research
IntegerValue size
int demand
int up_child
int right_child
int original_index
int cur_item_index
std::vector< int > dimensions
int cur_item_quantity
std::vector< int > used_dimensions
int64_t b
Definition table.cc:45
int64_t a
Definition table.cc:44
int arc
int index
void STLDeleteElements(T *container)
Definition stl_util.h:372
ArcFlowGraph BuildArcFlowGraph(const std::vector< int > &bin_dimensions, absl::Span< const std::vector< int > > item_dimensions_by_type, absl::Span< const int > demand_by_type)
Main method.
In SWIG mode, we don't want anything besides these top-level includes.
std::vector< int > DenseIntStableTopologicalSortOrDie(int num_nodes, const std::vector< std::pair< int, int > > &arcs)
trees with all degrees equal w the current value of w
int64_t demand
Definition resource.cc:126