Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
find_graph_symmetries.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <limits>
19#include <memory>
20#include <numeric>
21#include <string>
22#include <utility>
23#include <vector>
24
25#include "absl/algorithm/container.h"
26#include "absl/container/flat_hash_set.h"
27#include "absl/flags/flag.h"
28#include "absl/memory/memory.h"
29#include "absl/status/status.h"
30#include "absl/strings/str_format.h"
31#include "absl/strings/str_join.h"
32#include "absl/time/clock.h"
33#include "absl/time/time.h"
34#include "absl/types/span.h"
40#include "ortools/graph/util.h"
41
42ABSL_FLAG(bool, minimize_permutation_support_size, false,
43 "Tweak the algorithm to try and minimize the support size"
44 " of the generators produced. This may negatively impact the"
45 " performance, but works great on the sat_holeXXX benchmarks"
46 " to reduce the support size.");
47
48namespace operations_research {
49
51
52std::vector<int> CountTriangles(const ::util::StaticGraph<int, int>& graph,
53 int max_degree) {
54 std::vector<int> num_triangles(graph.num_nodes(), 0);
55 absl::flat_hash_set<std::pair<int, int>> arcs;
56 arcs.reserve(graph.num_arcs());
57 for (int a = 0; a < graph.num_arcs(); ++a) {
58 arcs.insert({graph.Tail(a), graph.Head(a)});
59 }
60 for (int node = 0; node < graph.num_nodes(); ++node) {
61 if (graph.OutDegree(node) > max_degree) continue;
62 int triangles = 0;
63 for (int neigh1 : graph[node]) {
64 for (int neigh2 : graph[node]) {
65 if (arcs.contains({neigh1, neigh2})) ++triangles;
66 }
67 }
68 num_triangles[node] = triangles;
69 }
70 return num_triangles;
71}
72
73void LocalBfs(const ::util::StaticGraph<int, int>& graph, int source,
74 int stop_after_num_nodes, std::vector<int>* visited,
75 std::vector<int>* num_within_radius,
76 // For performance, the user provides us with an already-
77 // allocated bitmask of size graph.num_nodes() with all values set
78 // to "false", which we'll restore in the same state upon return.
79 std::vector<bool>* tmp_mask) {
80 const int n = graph.num_nodes();
81 visited->clear();
82 num_within_radius->clear();
83 num_within_radius->push_back(1);
84 DCHECK_EQ(tmp_mask->size(), n);
85 DCHECK(absl::c_find(*tmp_mask, true) == tmp_mask->end());
86 visited->push_back(source);
87 (*tmp_mask)[source] = true;
88 int num_settled = 0;
89 int next_distance_change = 1;
90 while (num_settled < visited->size()) {
91 const int from = (*visited)[num_settled++];
92 for (const int child : graph[from]) {
93 if ((*tmp_mask)[child]) continue;
94 (*tmp_mask)[child] = true;
95 visited->push_back(child);
96 }
97 if (num_settled == next_distance_change) {
98 // We already know all the nodes at the next distance.
99 num_within_radius->push_back(visited->size());
100 if (num_settled >= stop_after_num_nodes) break;
101 next_distance_change = visited->size();
102 }
103 }
104 // Clean up 'tmp_mask' sparsely.
105 for (const int node : *visited) (*tmp_mask)[node] = false;
106 // If we explored the whole connected component, num_within_radius contains
107 // a spurious entry: remove it.
108 if (num_settled == visited->size()) {
109 DCHECK_GE(num_within_radius->size(), 2);
110 DCHECK_EQ(num_within_radius->back(),
111 (*num_within_radius)[num_within_radius->size() - 2]);
112 num_within_radius->pop_back();
113 }
114}
115
116namespace {
117// Some routines used below.
118void SwapFrontAndBack(std::vector<int>* v) {
119 DCHECK(!v->empty());
120 std::swap((*v)[0], v->back());
121}
122
123bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition& p1,
124 const DynamicPartition& p2,
125 int part_index) {
126 const int num_parts = p1.NumParts();
127 if (p2.NumParts() != num_parts) return false;
128 for (int p = part_index; p < num_parts; ++p) {
129 if (p1.SizeOfPart(p) != p2.SizeOfPart(p) ||
130 p1.ParentOfPart(p) != p2.ParentOfPart(p)) {
131 return false;
132 }
133 }
134 return true;
135}
136
137// Whether the "l1" list maps to "l2" under the permutation "permutation".
138// This method uses a transient bitmask on all the elements, which
139// should be entirely false before the call (and will be restored as such
140// after it).
141//
142// TODO(user): Make this method support multi-elements (i.e. an element may
143// be repeated in the list), and see if that's sufficient to make the whole
144// graph symmetry finder support multi-arcs.
145template <class List>
146bool ListMapsToList(const List& l1, const List& l2,
147 const DynamicPermutation& permutation,
148 std::vector<bool>* tmp_node_mask) {
149 int num_elements_delta = 0;
150 bool match = true;
151 for (const int mapped_x : l2) {
152 ++num_elements_delta;
153 (*tmp_node_mask)[mapped_x] = true;
154 }
155 for (const int x : l1) {
156 --num_elements_delta;
157 const int mapped_x = permutation.ImageOf(x);
158 if (!(*tmp_node_mask)[mapped_x]) {
159 match = false;
160 break;
161 }
162 (*tmp_node_mask)[mapped_x] = false;
163 }
164 if (num_elements_delta != 0) match = false;
165 if (!match) {
166 // We need to clean up tmp_node_mask.
167 for (const int x : l2) (*tmp_node_mask)[x] = false;
168 }
169 return match;
170}
171} // namespace
172
174 : graph_(graph),
175 tmp_dynamic_permutation_(NumNodes()),
176 tmp_node_mask_(NumNodes(), false),
177 tmp_degree_(NumNodes(), 0),
178 tmp_nodes_with_degree_(NumNodes() + 1) {
179 // Set up an "unlimited" time limit by default.
180 time_limit_ = &dummy_time_limit_;
181 tmp_partition_.Reset(NumNodes());
182 if (is_undirected) {
183 DCHECK(GraphIsSymmetric(graph));
184 } else {
185 // Compute the reverse adjacency lists.
186 // First pass: compute the total in-degree of all nodes and put it in
187 // reverse_adj_list_index (shifted by two; see below why).
188 reverse_adj_list_index_.assign(graph.num_nodes() + /*shift*/ 2, 0);
189 for (const int node : graph.AllNodes()) {
190 for (const int arc : graph.OutgoingArcs(node)) {
191 ++reverse_adj_list_index_[graph.Head(arc) + /*shift*/ 2];
192 }
193 }
194 // Second pass: apply a cumulative sum over reverse_adj_list_index.
195 // After that, reverse_adj_list contains:
196 // [0, 0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
197 std::partial_sum(reverse_adj_list_index_.begin() + /*shift*/ 2,
198 reverse_adj_list_index_.end(),
199 reverse_adj_list_index_.begin() + /*shift*/ 2);
200 // Third pass: populate "flattened_reverse_adj_lists", using
201 // reverse_adj_list_index[i] as a dynamic pointer to the yet-unpopulated
202 // area of the reverse adjacency list of node #i.
203 flattened_reverse_adj_lists_.assign(graph.num_arcs(), -1);
204 for (const int node : graph.AllNodes()) {
205 for (const int arc : graph.OutgoingArcs(node)) {
206 flattened_reverse_adj_lists_[reverse_adj_list_index_[graph.Head(arc) +
207 /*shift*/ 1]++] =
208 node;
209 }
210 }
211 // The last pass shifted reverse_adj_list_index, so it's now as we want it:
212 // [0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
213 if (DEBUG_MODE) {
214 DCHECK_EQ(graph.num_arcs(), reverse_adj_list_index_[graph.num_nodes()]);
215 for (const int i : flattened_reverse_adj_lists_) DCHECK_NE(i, -1);
216 }
217 }
218}
219
221 const DynamicPermutation& permutation) const {
222 for (const int base : permutation.AllMappingsSrc()) {
223 const int image = permutation.ImageOf(base);
224 if (image == base) continue;
225 if (!ListMapsToList(graph_[base], graph_[image], permutation,
226 &tmp_node_mask_)) {
227 return false;
228 }
229 }
230 if (!reverse_adj_list_index_.empty()) {
231 // The graph was not symmetric: we must also check the incoming arcs
232 // to displaced nodes.
233 for (const int base : permutation.AllMappingsSrc()) {
234 const int image = permutation.ImageOf(base);
235 if (image == base) continue;
236 if (!ListMapsToList(TailsOfIncomingArcsTo(base),
237 TailsOfIncomingArcsTo(image), permutation,
238 &tmp_node_mask_)) {
239 return false;
240 }
241 }
242 }
243 return true;
244}
245
246namespace {
247// Specialized subroutine, to avoid code duplication: see its call site
248// and its self-explanatory code.
249template <class T>
250inline void IncrementCounterForNonSingletons(const T& nodes,
251 const DynamicPartition& partition,
252 std::vector<int>* node_count,
253 std::vector<int>* nodes_seen,
254 int64_t* num_operations) {
255 *num_operations += nodes.end() - nodes.begin();
256 for (const int node : nodes) {
257 if (partition.ElementsInSamePartAs(node).size() == 1) continue;
258 const int count = ++(*node_count)[node];
259 if (count == 1) nodes_seen->push_back(node);
260 }
261}
262} // namespace
263
265 int first_unrefined_part_index, DynamicPartition* partition) {
266 // Rename, for readability of the code below.
267 std::vector<int>& tmp_nodes_with_nonzero_degree = tmp_stack_;
268
269 // This function is the main bottleneck of the whole algorithm. We count the
270 // number of blocks in the inner-most loops in num_operations. At the end we
271 // will multiply it by a factor to have some deterministic time that we will
272 // append to the deterministic time counter.
273 //
274 // TODO(user): We are really imprecise in our counting, but it is fine. We
275 // just need a way to enforce a deterministic limit on the computation effort.
276 int64_t num_operations = 0;
277
278 // Assuming that the partition was refined based on the adjacency on
279 // parts [0 .. first_unrefined_part_index) already, we simply need to
280 // refine parts first_unrefined_part_index ... NumParts()-1, the latter bound
281 // being a moving target:
282 // When a part #p < first_unrefined_part_index gets modified, it's always
283 // split in two: itself, and a new part #p'. Since #p was already refined
284 // on, we only need to further refine on *one* of its two split parts.
285 // And this will be done because p' > first_unrefined_part_index.
286 //
287 // Thus, the following loop really does the full recursive refinement as
288 // advertised.
289 std::vector<bool> adjacency_directions(1, /*outgoing*/ true);
290 if (!reverse_adj_list_index_.empty()) {
291 adjacency_directions.push_back(false); // Also look at incoming arcs.
292 }
293 for (int part_index = first_unrefined_part_index;
294 part_index < partition->NumParts(); // Moving target!
295 ++part_index) {
296 for (const bool outgoing_adjacency : adjacency_directions) {
297 // Count the aggregated degree of all nodes, only looking at arcs that
298 // come from/to the current part.
299 if (outgoing_adjacency) {
300 for (const int node : partition->ElementsInPart(part_index)) {
301 IncrementCounterForNonSingletons(
302 graph_[node], *partition, &tmp_degree_,
303 &tmp_nodes_with_nonzero_degree, &num_operations);
304 }
305 } else {
306 for (const int node : partition->ElementsInPart(part_index)) {
307 IncrementCounterForNonSingletons(
308 TailsOfIncomingArcsTo(node), *partition, &tmp_degree_,
309 &tmp_nodes_with_nonzero_degree, &num_operations);
310 }
311 }
312 // Group the nodes by (nonzero) degree. Remember the maximum degree.
313 int max_degree = 0;
314 num_operations += 3 + tmp_nodes_with_nonzero_degree.size();
315 for (const int node : tmp_nodes_with_nonzero_degree) {
316 const int degree = tmp_degree_[node];
317 tmp_degree_[node] = 0; // To clean up after us.
318 max_degree = std::max(max_degree, degree);
319 tmp_nodes_with_degree_[degree].push_back(node);
320 }
321 tmp_nodes_with_nonzero_degree.clear(); // To clean up after us.
322 // For each degree, refine the partition by the set of nodes with that
323 // degree.
324 for (int degree = 1; degree <= max_degree; ++degree) {
325 // We use a manually tuned factor 3 because Refine() does quite a bit of
326 // operations for each node in its argument.
327 num_operations += 1 + 3 * tmp_nodes_with_degree_[degree].size();
328 partition->Refine(tmp_nodes_with_degree_[degree]);
329 tmp_nodes_with_degree_[degree].clear(); // To clean up after us.
330 }
331 }
332 }
333
334 // The coefficient was manually tuned (only on a few instances) so that the
335 // time is roughly correlated with seconds on a fast desktop computer from
336 // 2020.
337 time_limit_->AdvanceDeterministicTime(1e-8 *
338 static_cast<double>(num_operations));
339}
340
342 int node, DynamicPartition* partition, std::vector<int>* new_singletons) {
343 const int original_num_parts = partition->NumParts();
344 partition->Refine(std::vector<int>(1, node));
345 RecursivelyRefinePartitionByAdjacency(partition->PartOf(node), partition);
346
347 // Explore the newly refined parts to gather all the new singletons.
348 if (new_singletons != nullptr) {
349 new_singletons->clear();
350 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
351 const int parent = partition->ParentOfPart(p);
352 // We may see the same singleton parent several times, so we guard them
353 // with the tmp_node_mask_ boolean vector.
354 if (!tmp_node_mask_[parent] && parent < original_num_parts &&
355 partition->SizeOfPart(parent) == 1) {
356 tmp_node_mask_[parent] = true;
357 new_singletons->push_back(*partition->ElementsInPart(parent).begin());
358 }
359 if (partition->SizeOfPart(p) == 1) {
360 new_singletons->push_back(*partition->ElementsInPart(p).begin());
361 }
362 }
363 // Reset tmp_node_mask_.
364 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
365 tmp_node_mask_[partition->ParentOfPart(p)] = false;
366 }
367 }
368}
369
370namespace {
371void MergeNodeEquivalenceClassesAccordingToPermutation(
372 const SparsePermutation& perm, MergingPartition* node_equivalence_classes,
373 DenseDoublyLinkedList* sorted_representatives) {
374 for (int c = 0; c < perm.NumCycles(); ++c) {
375 // TODO(user): use the global element->image iterator when it exists.
376 int prev = -1;
377 for (const int e : perm.Cycle(c)) {
378 if (prev >= 0) {
379 const int removed_representative =
380 node_equivalence_classes->MergePartsOf(prev, e);
381 if (sorted_representatives != nullptr && removed_representative != -1) {
382 sorted_representatives->Remove(removed_representative);
383 }
384 }
385 prev = e;
386 }
387 }
388}
389
390// Subroutine used by FindSymmetries(); see its call site. This finds and
391// outputs (in "pruned_other_nodes") the list of all representatives (under
392// "node_equivalence_classes") that are in the same part as
393// "representative_node" in "partition"; other than "representative_node"
394// itself.
395// "node_equivalence_classes" must be compatible with "partition", i.e. two
396// nodes that are in the same equivalence class must also be in the same part.
397//
398// To do this in O(output size), we also need the
399// "representatives_sorted_by_index_in_partition" data structure: the
400// representatives of the nodes of the targeted part are contiguous in that
401// linked list.
402void GetAllOtherRepresentativesInSamePartAs(
403 int representative_node, const DynamicPartition& partition,
404 const DenseDoublyLinkedList& representatives_sorted_by_index_in_partition,
405 MergingPartition* node_equivalence_classes, // Only for debugging.
406 std::vector<int>* pruned_other_nodes) {
407 pruned_other_nodes->clear();
408 const int part_index = partition.PartOf(representative_node);
409 // Iterate on all contiguous representatives after the initial one...
410 int repr = representative_node;
411 while (true) {
412 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
413 repr = representatives_sorted_by_index_in_partition.Prev(repr);
414 if (repr < 0 || partition.PartOf(repr) != part_index) break;
415 pruned_other_nodes->push_back(repr);
416 }
417 // ... and then on all contiguous representatives *before* it.
418 repr = representative_node;
419 while (true) {
420 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
421 repr = representatives_sorted_by_index_in_partition.Next(repr);
422 if (repr < 0 || partition.PartOf(repr) != part_index) break;
423 pruned_other_nodes->push_back(repr);
424 }
425
426 // This code is a bit tricky, so we check that we're doing it right, by
427 // comparing its output to the brute-force, O(Part size) version.
428 // This also (partly) verifies that
429 // "representatives_sorted_by_index_in_partition" is what it claims it is.
430 if (DEBUG_MODE) {
431 std::vector<int> expected_output;
432 for (const int e : partition.ElementsInPart(part_index)) {
433 if (node_equivalence_classes->GetRoot(e) != representative_node) {
434 expected_output.push_back(e);
435 }
436 }
437 node_equivalence_classes->KeepOnlyOneNodePerPart(&expected_output);
438 for (int& x : expected_output) x = node_equivalence_classes->GetRoot(x);
439 std::sort(expected_output.begin(), expected_output.end());
440 std::vector<int> sorted_output = *pruned_other_nodes;
441 std::sort(sorted_output.begin(), sorted_output.end());
442 DCHECK_EQ(absl::StrJoin(expected_output, " "),
443 absl::StrJoin(sorted_output, " "));
444 }
445}
446} // namespace
447
449 std::vector<int>* node_equivalence_classes_io,
450 std::vector<std::unique_ptr<SparsePermutation>>* generators,
451 std::vector<int>* factorized_automorphism_group_size,
453 // Initialization.
454 time_limit_ = time_limit == nullptr ? &dummy_time_limit_ : time_limit;
455 IF_STATS_ENABLED(stats_.initialization_time.StartTimer());
456 generators->clear();
457 factorized_automorphism_group_size->clear();
458 if (node_equivalence_classes_io->size() != NumNodes()) {
459 return absl::Status(absl::StatusCode::kInvalidArgument,
460 "Invalid 'node_equivalence_classes_io'.");
461 }
462 DynamicPartition base_partition(*node_equivalence_classes_io);
463 // Break all inherent asymmetries in the graph.
464 {
465 ScopedTimeDistributionUpdater u(&stats_.initialization_refine_time);
466 RecursivelyRefinePartitionByAdjacency(/*first_unrefined_part_index=*/0,
467 &base_partition);
468 }
469 if (time_limit_->LimitReached()) {
470 return absl::Status(absl::StatusCode::kDeadlineExceeded,
471 "During the initial refinement.");
472 }
473 VLOG(4) << "Base partition: "
474 << base_partition.DebugString(/*sort_parts_lexicographically=*/false);
475
476 MergingPartition node_equivalence_classes(NumNodes());
477 std::vector<std::vector<int>> permutations_displacing_node(NumNodes());
478 std::vector<int> potential_root_image_nodes;
479 IF_STATS_ENABLED(stats_.initialization_time.StopTimerAndAddElapsedTime());
480
481 // To find all permutations of the Graph that satisfy the current partition,
482 // we pick an element v that is not in a singleton part, and we
483 // split the search in two phases:
484 // 1) Find (the generators of) all permutations that keep v invariant.
485 // 2) For each w in PartOf(v) such that w != v:
486 // find *one* permutation that maps v to w, if it exists.
487 // if it does exists, add this to the generators.
488 //
489 // The part 1) is recursive.
490 //
491 // Since we can't really use true recursion because it will be too deep for
492 // the stack, we implement it iteratively. To do that, we unroll 1):
493 // the "invariant dive" is a single pass that successively refines the node
494 // base_partition with elements from non-singleton parts (the 'invariant
495 // node'), until all parts are singletons.
496 // We remember which nodes we picked as invariants, and also the successive
497 // partition sizes as we refine it, to allow us to backtrack.
498 // Then we'll perform 2) in the reverse order, backtracking the stack from 1)
499 // as using another dedicated stack for the search (see below).
500 IF_STATS_ENABLED(stats_.invariant_dive_time.StartTimer());
501 struct InvariantDiveState {
502 int invariant_node;
503 int num_parts_before_refinement;
504
505 InvariantDiveState(int node, int num_parts)
506 : invariant_node(node), num_parts_before_refinement(num_parts) {}
507 };
508 std::vector<InvariantDiveState> invariant_dive_stack;
509 // TODO(user): experiment with, and briefly describe the results of various
510 // algorithms for picking the invariant node:
511 // - random selection
512 // - highest/lowest degree first
513 // - enumerate by part index; or by part size
514 // - etc.
515 for (int invariant_node = 0; invariant_node < NumNodes(); ++invariant_node) {
516 if (base_partition.ElementsInSamePartAs(invariant_node).size() == 1) {
517 continue;
518 }
519 invariant_dive_stack.push_back(
520 InvariantDiveState(invariant_node, base_partition.NumParts()));
521 DistinguishNodeInPartition(invariant_node, &base_partition, nullptr);
522 VLOG(4) << "Invariant dive: invariant node = " << invariant_node
523 << "; partition after: "
524 << base_partition.DebugString(
525 /*sort_parts_lexicographically=*/false);
526 if (time_limit_->LimitReached()) {
527 return absl::Status(absl::StatusCode::kDeadlineExceeded,
528 "During the invariant dive.");
529 }
530 }
531 DenseDoublyLinkedList representatives_sorted_by_index_in_partition(
532 base_partition.ElementsInHierarchicalOrder());
533 DynamicPartition image_partition = base_partition;
534 IF_STATS_ENABLED(stats_.invariant_dive_time.StopTimerAndAddElapsedTime());
535 // Now we've dived to the bottom: we're left with the identity permutation,
536 // which we don't need as a generator. We move on to phase 2).
537
538 IF_STATS_ENABLED(stats_.main_search_time.StartTimer());
539 while (!invariant_dive_stack.empty()) {
540 if (time_limit_->LimitReached()) break;
541 // Backtrack the last step of 1) (the invariant dive).
542 IF_STATS_ENABLED(stats_.invariant_unroll_time.StartTimer());
543 const int root_node = invariant_dive_stack.back().invariant_node;
544 const int base_num_parts =
545 invariant_dive_stack.back().num_parts_before_refinement;
546 invariant_dive_stack.pop_back();
547 base_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
548 image_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
549 VLOG(4) << "Backtracking invariant dive: root node = " << root_node
550 << "; partition: "
551 << base_partition.DebugString(
552 /*sort_parts_lexicographically=*/false);
553
554 // Now we'll try to map "root_node" to all image nodes that seem compatible
555 // and that aren't "root_node" itself.
556 //
557 // Doing so, we're able to detect potential bad (or good) matches by
558 // refining the 'base' partition with "root_node"; and refining the
559 // 'image' partition (which represents the partition of images nodes,
560 // i.e. the nodes after applying the currently implicit permutation)
561 // with that candidate image node: if the two partitions don't match, then
562 // the candidate image isn't compatible.
563 // If the partitions do match, we might either find the underlying
564 // permutation directly, or we might need to further try and map other
565 // nodes to their image: this is a recursive search with backtracking.
566
567 // The potential images of root_node are the nodes in its part. They can be
568 // pruned by the already computed equivalence classes.
569 // TODO(user): better elect the representative of each equivalence class
570 // in order to reduce the permutation support down the line
571 // TODO(user): Don't build a list; but instead use direct, inline iteration
572 // on the representatives in the while() loop below, to benefit from the
573 // incremental merging of the equivalence classes.
574 DCHECK_EQ(1, node_equivalence_classes.NumNodesInSamePartAs(root_node));
575 GetAllOtherRepresentativesInSamePartAs(
576 root_node, base_partition, representatives_sorted_by_index_in_partition,
577 &node_equivalence_classes, &potential_root_image_nodes);
578 DCHECK(!potential_root_image_nodes.empty());
579 IF_STATS_ENABLED(stats_.invariant_unroll_time.StopTimerAndAddElapsedTime());
580
581 // Try to map "root_node" to all of its potential images. For each image,
582 // we only care about finding a single compatible permutation, if it exists.
583 while (!potential_root_image_nodes.empty()) {
584 if (time_limit_->LimitReached()) break;
585 VLOG(4) << "Potential (pruned) images of root node " << root_node
586 << " left: [" << absl::StrJoin(potential_root_image_nodes, " ")
587 << "].";
588 const int root_image_node = potential_root_image_nodes.back();
589 VLOG(4) << "Trying image of root node: " << root_image_node;
590
591 std::unique_ptr<SparsePermutation> permutation =
592 FindOneSuitablePermutation(root_node, root_image_node,
593 &base_partition, &image_partition,
594 *generators, permutations_displacing_node);
595
596 if (permutation != nullptr) {
597 ScopedTimeDistributionUpdater u(&stats_.permutation_output_time);
598 // We found a permutation. We store it in the list of generators, and
599 // further prune out the remaining 'root' image candidates, taking into
600 // account the permutation we just found.
601 MergeNodeEquivalenceClassesAccordingToPermutation(
602 *permutation, &node_equivalence_classes,
603 &representatives_sorted_by_index_in_partition);
604 // HACK(user): to make sure that we keep root_image_node as the
605 // representant of its part, we temporarily move it to the front
606 // of the vector, then move it again to the back so that it gets
607 // deleted by the pop_back() below.
608 SwapFrontAndBack(&potential_root_image_nodes);
609 node_equivalence_classes.KeepOnlyOneNodePerPart(
610 &potential_root_image_nodes);
611 SwapFrontAndBack(&potential_root_image_nodes);
612
613 // Register it onto the permutations_displacing_node vector.
614 const int permutation_index = static_cast<int>(generators->size());
615 for (const int node : permutation->Support()) {
616 permutations_displacing_node[node].push_back(permutation_index);
617 }
618
619 // Move the permutation to the generator list (this also transfers
620 // ownership).
621 generators->push_back(std::move(permutation));
622 }
623
624 potential_root_image_nodes.pop_back();
625 }
626
627 // We keep track of the size of the orbit of 'root_node' under the
628 // current subgroup: this is one of the factors of the total group size.
629 // TODO(user): better, more complete explanation.
630 factorized_automorphism_group_size->push_back(
631 node_equivalence_classes.NumNodesInSamePartAs(root_node));
632 }
633 node_equivalence_classes.FillEquivalenceClasses(node_equivalence_classes_io);
634 IF_STATS_ENABLED(stats_.main_search_time.StopTimerAndAddElapsedTime());
636 IF_STATS_ENABLED(LOG(INFO) << "Statistics: " << stats_.StatString());
637 if (time_limit_->LimitReached()) {
638 return absl::Status(absl::StatusCode::kDeadlineExceeded,
639 "Some automorphisms were found, but probably not all.");
640 }
641 return ::absl::OkStatus();
642}
643
644namespace {
645// This method can be easily understood in the context of
646// ConfirmFullMatchOrFindNextMappingDecision(): see its call sites.
647// Knowing that we want to map some element of part #part_index of
648// "base_partition" to part #part_index of "image_partition", pick the "best"
649// such mapping, for the global search algorithm.
650inline void GetBestMapping(const DynamicPartition& base_partition,
651 const DynamicPartition& image_partition,
652 int part_index, int* base_node, int* image_node) {
653 // As of pending CL 66620435, we've loosely tried three variants of
654 // GetBestMapping():
655 // 1) Just take the first element of the base part, map it to the first
656 // element of the image part.
657 // 2) Just take the first element of the base part, and map it to itself if
658 // possible, else map it to the first element of the image part
659 // 3) Scan all elements of the base parts until we find one that can map to
660 // itself. If there isn't one; we just fall back to the strategy 1).
661 //
662 // Variant 2) gives the best results on most benchmarks, in terms of speed,
663 // but 3) yields much smaller supports for the sat_holeXXX benchmarks, as
664 // long as it's combined with the other tweak enabled by
665 // FLAGS_minimize_permutation_support_size.
666 if (absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
667 // Variant 3).
668 for (const int node : base_partition.ElementsInPart(part_index)) {
669 if (image_partition.PartOf(node) == part_index) {
670 *image_node = *base_node = node;
671 return;
672 }
673 }
674 *base_node = *base_partition.ElementsInPart(part_index).begin();
675 *image_node = *image_partition.ElementsInPart(part_index).begin();
676 return;
677 }
678
679 // Variant 2).
680 *base_node = *base_partition.ElementsInPart(part_index).begin();
681 if (image_partition.PartOf(*base_node) == part_index) {
682 *image_node = *base_node;
683 } else {
684 *image_node = *image_partition.ElementsInPart(part_index).begin();
685 }
686}
687} // namespace
688
689// TODO(user): refactor this method and its submethods into a dedicated class
690// whose members will be ominously accessed by all the class methods; most
691// notably the search state stack. This may improve readability.
692std::unique_ptr<SparsePermutation>
693GraphSymmetryFinder::FindOneSuitablePermutation(
694 int root_node, int root_image_node, DynamicPartition* base_partition,
695 DynamicPartition* image_partition,
696 absl::Span<const std::unique_ptr<SparsePermutation>>
697 generators_found_so_far,
698 absl::Span<const std::vector<int>> permutations_displacing_node) {
699 // DCHECKs() and statistics.
700 ScopedTimeDistributionUpdater search_time_updater(&stats_.search_time);
701 DCHECK_EQ("", tmp_dynamic_permutation_.DebugString());
702 DCHECK_EQ(
703 base_partition->DebugString(/*sort_parts_lexicographically=*/false),
704 image_partition->DebugString(/*sort_parts_lexicographically=*/false));
705 DCHECK(search_states_.empty());
706
707 // These will be used during the search. See their usage.
708 std::vector<int> base_singletons;
709 std::vector<int> image_singletons;
710 int next_base_node;
711 int next_image_node;
712 int min_potential_mismatching_part_index;
713 std::vector<int> next_potential_image_nodes;
714
715 // Initialize the search: we can already distinguish "root_node" in the base
716 // partition. See the comment below.
717 search_states_.emplace_back(
718 /*base_node=*/root_node, /*first_image_node=*/-1,
719 /*num_parts_before_trying_to_map_base_node=*/base_partition->NumParts(),
720 /*min_potential_mismatching_part_index=*/base_partition->NumParts());
721 // We inject the image node directly as the "remaining_pruned_image_nodes".
722 search_states_.back().remaining_pruned_image_nodes.assign(1, root_image_node);
723 {
724 ScopedTimeDistributionUpdater u(&stats_.initial_search_refine_time);
725 DistinguishNodeInPartition(root_node, base_partition, &base_singletons);
726 }
727 while (!search_states_.empty()) {
728 if (time_limit_->LimitReached()) return nullptr;
729 // When exploring a SearchState "ss", we're supposed to have:
730 // - A base_partition that has already been refined on ss->base_node.
731 // (base_singleton is the list of singletons created on the base
732 // partition during that refinement).
733 // - A non-empty list of potential image nodes (we'll try them in reverse
734 // order).
735 // - An image partition that hasn't been refined yet.
736 //
737 // Also, one should note that the base partition (before its refinement on
738 // base_node) was deemed compatible with the image partition as it is now.
739 const SearchState& ss = search_states_.back();
740 const int image_node = ss.first_image_node >= 0
741 ? ss.first_image_node
742 : ss.remaining_pruned_image_nodes.back();
743
744 // Statistics, DCHECKs.
745 IF_STATS_ENABLED(stats_.search_depth.Add(search_states_.size()));
746 DCHECK_EQ(ss.num_parts_before_trying_to_map_base_node,
747 image_partition->NumParts());
748
749 // Apply the decision: map base_node to image_node. Since base_partition
750 // was already refined on base_node, we just need to refine image_partition.
751 {
752 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
753 DistinguishNodeInPartition(image_node, image_partition,
754 &image_singletons);
755 }
756 VLOG(4) << ss.DebugString();
757 VLOG(4) << base_partition->DebugString(
758 /*sort_parts_lexicographically=*/false);
759 VLOG(4) << image_partition->DebugString(
760 /*sort_parts_lexicographically=*/false);
761
762 // Run some diagnoses on the two partitions. There are many outcomes, so
763 // it's a bit complicated:
764 // 1) The partitions are incompatible
765 // - Because of a straightfoward criterion (size mismatch).
766 // - Because they are both fully refined (i.e. singletons only), yet the
767 // permutation induced by them is not a graph automorpshim.
768 // 2) The partitions induce a permutation (all their non-singleton parts are
769 // identical), and this permutation is a graph automorphism.
770 // 3) The partitions need further refinement:
771 // - Because some non-singleton parts aren't equal in the base and image
772 // partition
773 // - Or because they are a full match (i.e. may induce a permutation,
774 // like in 2)), but the induced permutation isn't a graph automorphism.
775 bool compatible = true;
776 {
777 ScopedTimeDistributionUpdater u(&stats_.quick_compatibility_time);
778 compatible = PartitionsAreCompatibleAfterPartIndex(
779 *base_partition, *image_partition,
780 ss.num_parts_before_trying_to_map_base_node);
781 u.AlsoUpdate(compatible ? &stats_.quick_compatibility_success_time
782 : &stats_.quick_compatibility_fail_time);
783 }
784 bool partitions_are_full_match = false;
785 if (compatible) {
786 {
788 &stats_.dynamic_permutation_refinement_time);
789 tmp_dynamic_permutation_.AddMappings(base_singletons, image_singletons);
790 }
791 ScopedTimeDistributionUpdater u(&stats_.map_election_std_time);
792 min_potential_mismatching_part_index =
793 ss.min_potential_mismatching_part_index;
794 partitions_are_full_match = ConfirmFullMatchOrFindNextMappingDecision(
795 *base_partition, *image_partition, tmp_dynamic_permutation_,
796 &min_potential_mismatching_part_index, &next_base_node,
797 &next_image_node);
798 u.AlsoUpdate(partitions_are_full_match
799 ? &stats_.map_election_std_full_match_time
800 : &stats_.map_election_std_mapping_time);
801 }
802 if (compatible && partitions_are_full_match) {
803 DCHECK_EQ(min_potential_mismatching_part_index,
804 base_partition->NumParts());
805 // We have a permutation candidate!
806 // Note(user): we also deal with (extremely rare) false positives for
807 // "partitions_are_full_match" here: in case they aren't a full match,
808 // IsGraphAutomorphism() will catch that; and we'll simply deepen the
809 // search.
810 bool is_automorphism = true;
811 {
812 ScopedTimeDistributionUpdater u(&stats_.automorphism_test_time);
813 is_automorphism = IsGraphAutomorphism(tmp_dynamic_permutation_);
814 u.AlsoUpdate(is_automorphism ? &stats_.automorphism_test_success_time
815 : &stats_.automorphism_test_fail_time);
816 }
817 if (is_automorphism) {
818 ScopedTimeDistributionUpdater u(&stats_.search_finalize_time);
819 // We found a valid permutation. We can return it, but first we
820 // must restore the partitions to their original state.
821 std::unique_ptr<SparsePermutation> sparse_permutation(
822 tmp_dynamic_permutation_.CreateSparsePermutation());
823 VLOG(4) << "Automorphism found: " << sparse_permutation->DebugString();
824 const int base_num_parts =
825 search_states_[0].num_parts_before_trying_to_map_base_node;
826 base_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
827 image_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
828 tmp_dynamic_permutation_.Reset();
829 search_states_.clear();
830
831 search_time_updater.AlsoUpdate(&stats_.search_time_success);
832 return sparse_permutation;
833 }
834
835 // The permutation isn't a valid automorphism. Either the partitions were
836 // fully refined, and we deem them incompatible, or they weren't, and we
837 // consider them as 'not a full match'.
838 VLOG(4) << "Permutation candidate isn't a valid automorphism.";
839 if (base_partition->NumParts() == NumNodes()) {
840 // Fully refined: the partitions are incompatible.
841 compatible = false;
842 ScopedTimeDistributionUpdater u(&stats_.dynamic_permutation_undo_time);
843 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
844 } else {
845 ScopedTimeDistributionUpdater u(&stats_.map_reelection_time);
846 // TODO(user): try to get the non-singleton part from
847 // DynamicPermutation in O(1). On some graphs like the symmetry of the
848 // mip problem lectsched-4-obj.mps.gz, this take the majority of the
849 // time!
850 int non_singleton_part = 0;
851 {
852 ScopedTimeDistributionUpdater u(&stats_.non_singleton_search_time);
853 while (base_partition->SizeOfPart(non_singleton_part) == 1) {
854 ++non_singleton_part;
855 DCHECK_LT(non_singleton_part, base_partition->NumParts());
856 }
857 }
858 time_limit_->AdvanceDeterministicTime(
859 1e-9 * static_cast<double>(non_singleton_part));
860
861 // The partitions are compatible, but we'll deepen the search on some
862 // non-singleton part. We can pick any base and image node in this case.
863 GetBestMapping(*base_partition, *image_partition, non_singleton_part,
864 &next_base_node, &next_image_node);
865 }
866 }
867
868 // Now we've fully diagnosed our partitions, and have already dealt with
869 // case 2). We're left to deal with 1) and 3).
870 //
871 // Case 1): partitions are incompatible.
872 if (!compatible) {
873 ScopedTimeDistributionUpdater u(&stats_.backtracking_time);
874 // We invalidate the current image node, and prune the remaining image
875 // nodes. We might be left with no other image nodes, which means that
876 // we'll backtrack, i.e. pop our current SearchState and invalidate the
877 // 'current' image node of the upper SearchState (which might lead to us
878 // backtracking it, and so on).
879 while (!search_states_.empty()) {
880 SearchState* const last_ss = &search_states_.back();
881 image_partition->UndoRefineUntilNumPartsEqual(
882 last_ss->num_parts_before_trying_to_map_base_node);
883 if (last_ss->first_image_node >= 0) {
884 // Find out and prune the remaining potential image nodes: there is
885 // no permutation that maps base_node -> image_node that is
886 // compatible with the current partition, so there can't be a
887 // permutation that maps base_node -> X either, for all X in the orbit
888 // of 'image_node' under valid permutations compatible with the
889 // current partition. Ditto for other potential image nodes.
890 //
891 // TODO(user): fix this: we should really be collecting all
892 // permutations displacing any node in "image_part", for the pruning
893 // to be really exhaustive. We could also consider alternative ways,
894 // like incrementally maintaining the list of permutations compatible
895 // with the partition so far.
896 const int part = image_partition->PartOf(last_ss->first_image_node);
897 last_ss->remaining_pruned_image_nodes.reserve(
898 image_partition->SizeOfPart(part));
899 last_ss->remaining_pruned_image_nodes.push_back(
900 last_ss->first_image_node);
901 for (const int e : image_partition->ElementsInPart(part)) {
902 if (e != last_ss->first_image_node) {
903 last_ss->remaining_pruned_image_nodes.push_back(e);
904 }
905 }
906 {
907 ScopedTimeDistributionUpdater u(&stats_.pruning_time);
908 PruneOrbitsUnderPermutationsCompatibleWithPartition(
909 *image_partition, generators_found_so_far,
910 permutations_displacing_node[last_ss->first_image_node],
911 &last_ss->remaining_pruned_image_nodes);
912 }
913 SwapFrontAndBack(&last_ss->remaining_pruned_image_nodes);
914 DCHECK_EQ(last_ss->remaining_pruned_image_nodes.back(),
915 last_ss->first_image_node);
916 last_ss->first_image_node = -1;
917 }
918 last_ss->remaining_pruned_image_nodes.pop_back();
919 if (!last_ss->remaining_pruned_image_nodes.empty()) break;
920
921 VLOG(4) << "Backtracking one level up.";
922 base_partition->UndoRefineUntilNumPartsEqual(
923 last_ss->num_parts_before_trying_to_map_base_node);
924 // If this was the root search state (i.e. we fully backtracked and
925 // will exit the search after that), we don't have mappings to undo.
926 // We run UndoLastMappings() anyway, because it's a no-op in that case.
927 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
928 search_states_.pop_back();
929 }
930 // Continue the search.
931 continue;
932 }
933
934 // Case 3): we deepen the search.
935 // Since the search loop starts from an already-refined base_partition,
936 // we must do it here.
937 VLOG(4) << " Deepening the search.";
938 search_states_.emplace_back(
939 next_base_node, next_image_node,
940 /*num_parts_before_trying_to_map_base_node*/ base_partition->NumParts(),
941 min_potential_mismatching_part_index);
942 {
943 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
944 DistinguishNodeInPartition(next_base_node, base_partition,
945 &base_singletons);
946 }
947 }
948 // We exhausted the search; we didn't find any permutation.
949 search_time_updater.AlsoUpdate(&stats_.search_time_fail);
950 return nullptr;
951}
952
954GraphSymmetryFinder::TailsOfIncomingArcsTo(int node) const {
956 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node],
957 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node + 1]);
958}
959
960void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition(
961 const DynamicPartition& partition,
962 absl::Span<const std::unique_ptr<SparsePermutation>> permutations,
963 absl::Span<const int> permutation_indices, std::vector<int>* nodes) {
964 VLOG(4) << " Pruning [" << absl::StrJoin(*nodes, ", ") << "]";
965 // TODO(user): apply a smarter test to decide whether to do the pruning
966 // or not: we can accurately estimate the cost of pruning (iterate through
967 // all generators found so far) and its estimated benefit (the cost of
968 // the search below the state that we're currently in, times the expected
969 // number of pruned nodes). Sometimes it may be better to skip the
970 // pruning.
971 if (nodes->size() <= 1) return;
972
973 // Iterate on all targeted permutations. If they are compatible, apply
974 // them to tmp_partition_ which will contain the incrementally merged
975 // equivalence classes.
976 std::vector<int>& tmp_nodes_on_support =
977 tmp_stack_; // Rename, for readability.
978 DCHECK(tmp_nodes_on_support.empty());
979 // TODO(user): investigate further optimizations: maybe it's possible
980 // to incrementally maintain the set of permutations that is compatible
981 // with the current partition, instead of recomputing it here?
982 for (const int p : permutation_indices) {
983 const SparsePermutation& permutation = *permutations[p];
984 // First, a quick compatibility check: the permutation's cycles must be
985 // smaller or equal to the size of the part that they are included in.
986 bool compatible = true;
987 for (int c = 0; c < permutation.NumCycles(); ++c) {
988 const SparsePermutation::Iterator cycle = permutation.Cycle(c);
989 if (cycle.size() >
990 partition.SizeOfPart(partition.PartOf(*cycle.begin()))) {
991 compatible = false;
992 break;
993 }
994 }
995 if (!compatible) continue;
996 // Now the full compatibility check: each cycle of the permutation must
997 // be fully included in an image part.
998 for (int c = 0; c < permutation.NumCycles(); ++c) {
999 int part = -1;
1000 for (const int node : permutation.Cycle(c)) {
1001 if (partition.PartOf(node) != part) {
1002 if (part >= 0) {
1003 compatible = false;
1004 break;
1005 }
1006 part = partition.PartOf(node); // Initialization of 'part'.
1007 }
1008 }
1009 }
1010 if (!compatible) continue;
1011 // The permutation is fully compatible!
1012 // TODO(user): ignore cycles that are outside of image_part.
1013 MergeNodeEquivalenceClassesAccordingToPermutation(permutation,
1014 &tmp_partition_, nullptr);
1015 for (const int node : permutation.Support()) {
1016 if (!tmp_node_mask_[node]) {
1017 tmp_node_mask_[node] = true;
1018 tmp_nodes_on_support.push_back(node);
1019 }
1020 }
1021 }
1022
1023 // Apply the pruning.
1024 tmp_partition_.KeepOnlyOneNodePerPart(nodes);
1025
1026 // Reset the "tmp_" structures sparsely.
1027 for (const int node : tmp_nodes_on_support) {
1028 tmp_node_mask_[node] = false;
1029 tmp_partition_.ResetNode(node);
1030 }
1031 tmp_nodes_on_support.clear();
1032 VLOG(4) << " Pruned: [" << absl::StrJoin(*nodes, ", ") << "]";
1033}
1034
1035bool GraphSymmetryFinder::ConfirmFullMatchOrFindNextMappingDecision(
1036 const DynamicPartition& base_partition,
1037 const DynamicPartition& image_partition,
1038 const DynamicPermutation& current_permutation_candidate,
1039 int* min_potential_mismatching_part_index_io, int* next_base_node,
1040 int* next_image_node) const {
1041 *next_base_node = -1;
1042 *next_image_node = -1;
1043
1044 // The following clause should be true most of the times, except in some
1045 // specific use cases.
1046 if (!absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
1047 // First, we try to map the loose ends of the current permutations: these
1048 // loose ends can't be mapped to themselves, so we'll have to map them to
1049 // something anyway.
1050 for (const int loose_node : current_permutation_candidate.LooseEnds()) {
1051 DCHECK_GT(base_partition.ElementsInSamePartAs(loose_node).size(), 1);
1052 *next_base_node = loose_node;
1053 const int root = current_permutation_candidate.RootOf(loose_node);
1054 DCHECK_NE(root, loose_node);
1055 if (image_partition.PartOf(root) == base_partition.PartOf(loose_node)) {
1056 // We prioritize mapping a loose end to its own root (i.e. close a
1057 // cycle), if possible, like here: we exit immediately.
1058 *next_image_node = root;
1059 return false;
1060 }
1061 }
1062 if (*next_base_node != -1) {
1063 // We found loose ends, but none that mapped to its own root. Just pick
1064 // any valid image.
1065 *next_image_node =
1066 *image_partition
1067 .ElementsInPart(base_partition.PartOf(*next_base_node))
1068 .begin();
1069 return false;
1070 }
1071 }
1072
1073 // If there is no loose node (i.e. the current permutation only has closed
1074 // cycles), we fall back to picking any part that is different in the base and
1075 // image partitions; because we know that some mapping decision will have to
1076 // be made there.
1077 // SUBTLE: we use "min_potential_mismatching_part_index_io" to incrementally
1078 // keep running this search (for a mismatching part) from where we left off.
1079 // TODO(user): implement a simpler search for a mismatching part: it's
1080 // trivially possible if the base partition maintains a hash set of all
1081 // Fprints of its parts, and if the image partition uses that to maintain the
1082 // list of 'different' non-singleton parts.
1083 const int initial_min_potential_mismatching_part_index =
1084 *min_potential_mismatching_part_index_io;
1085 for (; *min_potential_mismatching_part_index_io < base_partition.NumParts();
1086 ++*min_potential_mismatching_part_index_io) {
1087 const int p = *min_potential_mismatching_part_index_io;
1088 if (base_partition.SizeOfPart(p) != 1 &&
1089 base_partition.FprintOfPart(p) != image_partition.FprintOfPart(p)) {
1090 GetBestMapping(base_partition, image_partition, p, next_base_node,
1091 next_image_node);
1092 return false;
1093 }
1094
1095 const int parent = base_partition.ParentOfPart(p);
1096 if (parent < initial_min_potential_mismatching_part_index &&
1097 base_partition.SizeOfPart(parent) != 1 &&
1098 base_partition.FprintOfPart(parent) !=
1099 image_partition.FprintOfPart(parent)) {
1100 GetBestMapping(base_partition, image_partition, parent, next_base_node,
1101 next_image_node);
1102 return false;
1103 }
1104 }
1105
1106 // We didn't find an unequal part. DCHECK that our "incremental" check was
1107 // actually correct and that all non-singleton parts are indeed equal.
1108 if (DEBUG_MODE) {
1109 for (int p = 0; p < base_partition.NumParts(); ++p) {
1110 if (base_partition.SizeOfPart(p) != 1) {
1111 CHECK_EQ(base_partition.FprintOfPart(p),
1112 image_partition.FprintOfPart(p));
1113 }
1114 }
1115 }
1116 return true;
1117}
1118
1119std::string GraphSymmetryFinder::SearchState::DebugString() const {
1120 return absl::StrFormat(
1121 "SearchState{ base_node=%d, first_image_node=%d,"
1122 " remaining_pruned_image_nodes=[%s],"
1123 " num_parts_before_trying_to_map_base_node=%d }",
1124 base_node, first_image_node,
1125 absl::StrJoin(remaining_pruned_image_nodes, " "),
1126 num_parts_before_trying_to_map_base_node);
1127}
1128
1129} // namespace operations_research
IntegerValue size
void Remove(int i)
You must not call Remove() twice with the same element.
const std::vector< int > & ElementsInHierarchicalOrder() const
std::string DebugString(bool sort_parts_lexicographically) const
IterablePart ElementsInSamePartAs(int i) const
void UndoRefineUntilNumPartsEqual(int original_num_parts)
void Refine(absl::Span< const int > distinguished_subset)
IterablePart ElementsInPart(int i) const
*** Implementation of inline methods of the above classes. ***
const std::vector< int > & AllMappingsSrc() const
Returns the union of all "src" ever given to AddMappings().
int ImageOf(int i) const
Forced-inline for the speed.
void UndoLastMappings(std::vector< int > *undone_mapping_src)
void AddMappings(absl::Span< const int > src, absl::Span< const int > dst)
std::unique_ptr< SparsePermutation > CreateSparsePermutation() const
absl::Status FindSymmetries(std::vector< int > *node_equivalence_classes_io, std::vector< std::unique_ptr< SparsePermutation > > *generators, std::vector< int > *factorized_automorphism_group_size, TimeLimit *time_limit=nullptr)
void RecursivelyRefinePartitionByAdjacency(int first_unrefined_part_index, DynamicPartition *partition)
void DistinguishNodeInPartition(int node, DynamicPartition *partition, std::vector< int > *new_singletons_or_null)
**** Methods below are public FOR TESTING ONLY. ****
GraphSymmetryFinder(const Graph &graph, bool is_undirected)
bool IsGraphAutomorphism(const DynamicPermutation &permutation) const
void KeepOnlyOneNodePerPart(std::vector< int > *nodes)
int FillEquivalenceClasses(std::vector< int > *node_equivalence_classes)
void SetPrintOrder(PrintOrder print_order)
Definition stats.h:154
std::string StatString() const
Definition stats.cc:77
void StartTimer()
Starts the timer in preparation of a StopTimerAndAddElapsedTime().
Definition stats.h:247
void AdvanceDeterministicTime(double deterministic_duration)
Definition time_limit.h:183
int64_t a
Definition table.cc:44
GraphType graph
ABSL_FLAG(bool, minimize_permutation_support_size, false, "Tweak the algorithm to try and minimize the support size" " of the generators produced. This may negatively impact the" " performance, but works great on the sat_holeXXX benchmarks" " to reduce the support size.")
time_limit
Definition solve.cc:22
int arc
const bool DEBUG_MODE
Definition macros.h:24
In SWIG mode, we don't want anything besides these top-level includes.
void LocalBfs(const ::util::StaticGraph< int, int > &graph, int source, int stop_after_num_nodes, std::vector< int > *visited, std::vector< int > *num_within_radius, std::vector< bool > *tmp_mask)
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition stats.h:414
std::vector< int > CountTriangles(const ::util::StaticGraph< int, int > &graph, int max_degree)
HELPER FUNCTIONS: PUBLIC FOR UNIT TESTING ONLY.
bool GraphIsSymmetric(const Graph &graph)
Definition util.h:221
const Variable x
Definition qp_tests.cc:127
int nodes
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
std::vector< int >::const_iterator begin() const