Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
find_graph_symmetries.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <memory>
20#include <numeric>
21#include <string>
22#include <utility>
23#include <vector>
24
25#include "absl/algorithm/container.h"
26#include "absl/base/log_severity.h"
27#include "absl/container/flat_hash_map.h"
28#include "absl/container/flat_hash_set.h"
29#include "absl/flags/flag.h"
30#include "absl/log/check.h"
31#include "absl/numeric/int128.h"
32#include "absl/status/status.h"
33#include "absl/strings/str_format.h"
34#include "absl/strings/str_join.h"
35#include "absl/time/clock.h"
36#include "absl/time/time.h"
37#include "absl/types/span.h"
43#include "ortools/graph/graph.h"
45#include "ortools/graph/util.h"
46
47ABSL_FLAG(bool, minimize_permutation_support_size, false,
48 "Tweak the algorithm to try and minimize the support size"
49 " of the generators produced. This may negatively impact the"
50 " performance, but works great on the sat_holeXXX benchmarks"
51 " to reduce the support size.");
52
53namespace operations_research {
54
56
57std::vector<int> CountTriangles(const ::util::StaticGraph<int, int>& graph,
58 int max_degree) {
59 std::vector<int> num_triangles(graph.num_nodes(), 0);
60 absl::flat_hash_set<std::pair<int, int>> arcs;
61 arcs.reserve(graph.num_arcs());
62 for (int a = 0; a < graph.num_arcs(); ++a) {
63 arcs.insert({graph.Tail(a), graph.Head(a)});
64 }
65 for (int node = 0; node < graph.num_nodes(); ++node) {
66 if (graph.OutDegree(node) > max_degree) continue;
67 int triangles = 0;
68 for (int neigh1 : graph[node]) {
69 for (int neigh2 : graph[node]) {
70 if (arcs.contains({neigh1, neigh2})) ++triangles;
71 }
72 }
73 num_triangles[node] = triangles;
74 }
75 return num_triangles;
76}
77
78void LocalBfs(const ::util::StaticGraph<int, int>& graph, int source,
79 int stop_after_num_nodes, std::vector<int>* visited,
80 std::vector<int>* num_within_radius,
81 // For performance, the user provides us with an already-
82 // allocated bitmask of size graph.num_nodes() with all values set
83 // to "false", which we'll restore in the same state upon return.
84 std::vector<bool>* tmp_mask) {
85 const int n = graph.num_nodes();
86 visited->clear();
87 num_within_radius->clear();
88 num_within_radius->push_back(1);
89 DCHECK_EQ(tmp_mask->size(), n);
90 DCHECK(absl::c_find(*tmp_mask, true) == tmp_mask->end());
91 visited->push_back(source);
92 (*tmp_mask)[source] = true;
93 int num_settled = 0;
94 int next_distance_change = 1;
95 while (num_settled < visited->size()) {
96 const int from = (*visited)[num_settled++];
97 for (const int child : graph[from]) {
98 if ((*tmp_mask)[child]) continue;
99 (*tmp_mask)[child] = true;
100 visited->push_back(child);
101 }
102 if (num_settled == next_distance_change) {
103 // We already know all the nodes at the next distance.
104 num_within_radius->push_back(visited->size());
105 if (num_settled >= stop_after_num_nodes) break;
106 next_distance_change = visited->size();
107 }
108 }
109 // Clean up 'tmp_mask' sparsely.
110 for (const int node : *visited) (*tmp_mask)[node] = false;
111 // If we explored the whole connected component, num_within_radius contains
112 // a spurious entry: remove it.
113 if (num_settled == visited->size()) {
114 DCHECK_GE(num_within_radius->size(), 2);
115 DCHECK_EQ(num_within_radius->back(),
116 (*num_within_radius)[num_within_radius->size() - 2]);
117 num_within_radius->pop_back();
118 }
119}
120
121namespace {
122// Some routines used below.
123void SwapFrontAndBack(std::vector<int>* v) {
124 DCHECK(!v->empty());
125 std::swap((*v)[0], v->back());
126}
127
128bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition& p1,
129 const DynamicPartition& p2,
130 int part_index) {
131 const int num_parts = p1.NumParts();
132 if (p2.NumParts() != num_parts) return false;
133 for (int p = part_index; p < num_parts; ++p) {
134 if (p1.SizeOfPart(p) != p2.SizeOfPart(p) ||
135 p1.ParentOfPart(p) != p2.ParentOfPart(p)) {
136 return false;
137 }
138 }
139 return true;
140}
141
142// Whether the "l1" list maps to "l2" under the permutation "permutation".
143// This method uses a transient bitmask on all the elements, which
144// should be entirely false before the call (and will be restored as such
145// after it).
146//
147// TODO(user): Make this method support multi-elements (i.e. an element may
148// be repeated in the list), and see if that's sufficient to make the whole
149// graph symmetry finder support multi-arcs.
150template <class List>
151bool ListMapsToList(const List& l1, const List& l2,
152 const DynamicPermutation& permutation,
153 std::vector<bool>* tmp_node_mask) {
154 int num_elements_delta = 0;
155 bool match = true;
156 for (const int mapped_x : l2) {
157 ++num_elements_delta;
158 (*tmp_node_mask)[mapped_x] = true;
159 }
160 for (const int x : l1) {
161 --num_elements_delta;
162 const int mapped_x = permutation.ImageOf(x);
163 if (!(*tmp_node_mask)[mapped_x]) {
164 match = false;
165 break;
166 }
167 (*tmp_node_mask)[mapped_x] = false;
168 }
169 if (num_elements_delta != 0) match = false;
170 if (!match) {
171 // We need to clean up tmp_node_mask.
172 for (const int x : l2) (*tmp_node_mask)[x] = false;
173 }
174 return match;
175}
176} // namespace
177
178GraphSymmetryFinder::GraphSymmetryFinder(const Graph& graph, bool is_undirected)
179 : graph_(graph),
180 tmp_dynamic_permutation_(NumNodes()),
181 tmp_node_mask_(NumNodes(), false),
182 tmp_degree_(NumNodes(), 0),
183 tmp_nodes_with_degree_(NumNodes() + 1) {
184 // Set up an "unlimited" time limit by default.
185 time_limit_ = &dummy_time_limit_;
186 tmp_partition_.Reset(NumNodes());
187 if (is_undirected) {
188 DCHECK(GraphIsSymmetric(graph));
189 } else {
190 // Compute the reverse adjacency lists.
191 // First pass: compute the total in-degree of all nodes and put it in
192 // reverse_adj_list_index (shifted by two; see below why).
193 reverse_adj_list_index_.assign(graph.num_nodes() + /*shift*/ 2, 0);
194 for (const int node : graph.AllNodes()) {
195 for (const int arc : graph.OutgoingArcs(node)) {
196 ++reverse_adj_list_index_[graph.Head(arc) + /*shift*/ 2];
197 }
198 }
199 // Second pass: apply a cumulative sum over reverse_adj_list_index.
200 // After that, reverse_adj_list contains:
201 // [0, 0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
202 std::partial_sum(reverse_adj_list_index_.begin() + /*shift*/ 2,
203 reverse_adj_list_index_.end(),
204 reverse_adj_list_index_.begin() + /*shift*/ 2);
205 // Third pass: populate "flattened_reverse_adj_lists", using
206 // reverse_adj_list_index[i] as a dynamic pointer to the yet-unpopulated
207 // area of the reverse adjacency list of node #i.
208 flattened_reverse_adj_lists_.assign(graph.num_arcs(), -1);
209 for (const int node : graph.AllNodes()) {
210 for (const int arc : graph.OutgoingArcs(node)) {
211 flattened_reverse_adj_lists_[reverse_adj_list_index_[graph.Head(arc) +
212 /*shift*/ 1]++] =
213 node;
214 }
215 }
216 // The last pass shifted reverse_adj_list_index, so it's now as we want it:
217 // [0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
218 if (DEBUG_MODE) {
219 DCHECK_EQ(graph.num_arcs(), reverse_adj_list_index_[graph.num_nodes()]);
220 for (const int i : flattened_reverse_adj_lists_) DCHECK_NE(i, -1);
221 }
222 }
223}
224
226 const DynamicPermutation& permutation) const {
227 for (const int base : permutation.AllMappingsSrc()) {
228 const int image = permutation.ImageOf(base);
229 if (image == base) continue;
230 if (!ListMapsToList(graph_[base], graph_[image], permutation,
231 &tmp_node_mask_)) {
232 return false;
233 }
234 }
235 if (!reverse_adj_list_index_.empty()) {
236 // The graph was not symmetric: we must also check the incoming arcs
237 // to displaced nodes.
238 for (const int base : permutation.AllMappingsSrc()) {
239 const int image = permutation.ImageOf(base);
240 if (image == base) continue;
241 if (!ListMapsToList(TailsOfIncomingArcsTo(base),
242 TailsOfIncomingArcsTo(image), permutation,
243 &tmp_node_mask_)) {
244 return false;
245 }
246 }
247 }
248 return true;
249}
250
251namespace {
252// Specialized subroutine, to avoid code duplication: see its call site
253// and its self-explanatory code.
254template <class T>
255inline void IncrementCounterForNonSingletons(const T& nodes,
256 const DynamicPartition& partition,
257 std::vector<int>* node_count,
258 std::vector<int>* nodes_seen,
259 int64_t* num_operations) {
260 *num_operations += nodes.end() - nodes.begin();
261 for (const int node : nodes) {
262 if (partition.ElementsInSamePartAs(node).size() == 1) continue;
263 const int count = ++(*node_count)[node];
264 if (count == 1) nodes_seen->push_back(node);
265 }
266}
267} // namespace
268
270 int first_unrefined_part_index, DynamicPartition* partition) {
271 // Rename, for readability of the code below.
272 std::vector<int>& tmp_nodes_with_nonzero_degree = tmp_stack_;
273
274 // This function is the main bottleneck of the whole algorithm. We count the
275 // number of blocks in the inner-most loops in num_operations. At the end we
276 // will multiply it by a factor to have some deterministic time that we will
277 // append to the deterministic time counter.
278 //
279 // TODO(user): We are really imprecise in our counting, but it is fine. We
280 // just need a way to enforce a deterministic limit on the computation effort.
281 int64_t num_operations = 0;
282
283 // Assuming that the partition was refined based on the adjacency on
284 // parts [0 .. first_unrefined_part_index) already, we simply need to
285 // refine parts first_unrefined_part_index ... NumParts()-1, the latter bound
286 // being a moving target:
287 // When a part #p < first_unrefined_part_index gets modified, it's always
288 // split in two: itself, and a new part #p'. Since #p was already refined
289 // on, we only need to further refine on *one* of its two split parts.
290 // And this will be done because p' > first_unrefined_part_index.
291 //
292 // Thus, the following loop really does the full recursive refinement as
293 // advertised.
294 std::vector<bool> adjacency_directions(1, /*outgoing*/ true);
295 if (!reverse_adj_list_index_.empty()) {
296 adjacency_directions.push_back(false); // Also look at incoming arcs.
297 }
298 for (int part_index = first_unrefined_part_index;
299 part_index < partition->NumParts(); // Moving target!
300 ++part_index) {
301 for (const bool outgoing_adjacency : adjacency_directions) {
302 // Count the aggregated degree of all nodes, only looking at arcs that
303 // come from/to the current part.
304 if (outgoing_adjacency) {
305 for (const int node : partition->ElementsInPart(part_index)) {
306 IncrementCounterForNonSingletons(
307 graph_[node], *partition, &tmp_degree_,
308 &tmp_nodes_with_nonzero_degree, &num_operations);
309 }
310 } else {
311 for (const int node : partition->ElementsInPart(part_index)) {
312 IncrementCounterForNonSingletons(
313 TailsOfIncomingArcsTo(node), *partition, &tmp_degree_,
314 &tmp_nodes_with_nonzero_degree, &num_operations);
315 }
316 }
317 // Group the nodes by (nonzero) degree. Remember the maximum degree.
318 int max_degree = 0;
319 num_operations += 3 + tmp_nodes_with_nonzero_degree.size();
320 for (const int node : tmp_nodes_with_nonzero_degree) {
321 const int degree = tmp_degree_[node];
322 tmp_degree_[node] = 0; // To clean up after us.
323 max_degree = std::max(max_degree, degree);
324 tmp_nodes_with_degree_[degree].push_back(node);
325 }
326 tmp_nodes_with_nonzero_degree.clear(); // To clean up after us.
327 // For each degree, refine the partition by the set of nodes with that
328 // degree.
329 for (int degree = 1; degree <= max_degree; ++degree) {
330 // We use a manually tuned factor 3 because Refine() does quite a bit of
331 // operations for each node in its argument.
332 num_operations += 1 + 3 * tmp_nodes_with_degree_[degree].size();
333 partition->Refine(tmp_nodes_with_degree_[degree]);
334 tmp_nodes_with_degree_[degree].clear(); // To clean up after us.
335 }
336 }
337 }
338
339 // The coefficient was manually tuned (only on a few instances) so that the
340 // time is roughly correlated with seconds on a fast desktop computer from
341 // 2020.
342 time_limit_->AdvanceDeterministicTime(1e-8 *
343 static_cast<double>(num_operations));
344}
345
347 int node, DynamicPartition* partition, std::vector<int>* new_singletons) {
348 const int original_num_parts = partition->NumParts();
349 partition->Refine(std::vector<int>(1, node));
350 RecursivelyRefinePartitionByAdjacency(partition->PartOf(node), partition);
351
352 // Explore the newly refined parts to gather all the new singletons.
353 if (new_singletons != nullptr) {
354 new_singletons->clear();
355 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
356 const int parent = partition->ParentOfPart(p);
357 // We may see the same singleton parent several times, so we guard them
358 // with the tmp_node_mask_ boolean vector.
359 if (!tmp_node_mask_[parent] && parent < original_num_parts &&
360 partition->SizeOfPart(parent) == 1) {
361 tmp_node_mask_[parent] = true;
362 new_singletons->push_back(*partition->ElementsInPart(parent).begin());
363 }
364 if (partition->SizeOfPart(p) == 1) {
365 new_singletons->push_back(*partition->ElementsInPart(p).begin());
366 }
367 }
368 // Reset tmp_node_mask_.
369 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
370 tmp_node_mask_[partition->ParentOfPart(p)] = false;
371 }
372 }
373}
374
375namespace {
376void MergeNodeEquivalenceClassesAccordingToPermutation(
377 const SparsePermutation& perm, MergingPartition* node_equivalence_classes,
378 DenseDoublyLinkedList* sorted_representatives) {
379 for (int c = 0; c < perm.NumCycles(); ++c) {
380 // TODO(user): use the global element->image iterator when it exists.
381 int prev = -1;
382 for (const int e : perm.Cycle(c)) {
383 if (prev >= 0) {
384 const int removed_representative =
385 node_equivalence_classes->MergePartsOf(prev, e);
386 if (sorted_representatives != nullptr && removed_representative != -1) {
387 sorted_representatives->Remove(removed_representative);
388 }
389 }
390 prev = e;
391 }
392 }
393}
394
395// Subroutine used by FindSymmetries(); see its call site. This finds and
396// outputs (in "pruned_other_nodes") the list of all representatives (under
397// "node_equivalence_classes") that are in the same part as
398// "representative_node" in "partition"; other than "representative_node"
399// itself.
400// "node_equivalence_classes" must be compatible with "partition", i.e. two
401// nodes that are in the same equivalence class must also be in the same part.
402//
403// To do this in O(output size), we also need the
404// "representatives_sorted_by_index_in_partition" data structure: the
405// representatives of the nodes of the targeted part are contiguous in that
406// linked list.
407void GetAllOtherRepresentativesInSamePartAs(
408 int representative_node, const DynamicPartition& partition,
409 const DenseDoublyLinkedList& representatives_sorted_by_index_in_partition,
410 MergingPartition* node_equivalence_classes, // Only for debugging.
411 std::vector<int>* pruned_other_nodes) {
412 pruned_other_nodes->clear();
413 const int part_index = partition.PartOf(representative_node);
414 // Iterate on all contiguous representatives after the initial one...
415 int repr = representative_node;
416 while (true) {
417 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
418 repr = representatives_sorted_by_index_in_partition.Prev(repr);
419 if (repr < 0 || partition.PartOf(repr) != part_index) break;
420 pruned_other_nodes->push_back(repr);
421 }
422 // ... and then on all contiguous representatives *before* it.
423 repr = representative_node;
424 while (true) {
425 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
426 repr = representatives_sorted_by_index_in_partition.Next(repr);
427 if (repr < 0 || partition.PartOf(repr) != part_index) break;
428 pruned_other_nodes->push_back(repr);
429 }
430
431 // This code is a bit tricky, so we check that we're doing it right, by
432 // comparing its output to the brute-force, O(Part size) version.
433 // This also (partly) verifies that
434 // "representatives_sorted_by_index_in_partition" is what it claims it is.
435 if (DEBUG_MODE) {
436 std::vector<int> expected_output;
437 for (const int e : partition.ElementsInPart(part_index)) {
438 if (node_equivalence_classes->GetRoot(e) != representative_node) {
439 expected_output.push_back(e);
440 }
441 }
442 node_equivalence_classes->KeepOnlyOneNodePerPart(&expected_output);
443 for (int& x : expected_output) x = node_equivalence_classes->GetRoot(x);
444 std::sort(expected_output.begin(), expected_output.end());
445 std::vector<int> sorted_output = *pruned_other_nodes;
446 std::sort(sorted_output.begin(), sorted_output.end());
447 DCHECK_EQ(absl::StrJoin(expected_output, " "),
448 absl::StrJoin(sorted_output, " "));
449 }
450}
451} // namespace
452
454 std::vector<int>* node_equivalence_classes_io,
455 std::vector<std::unique_ptr<SparsePermutation>>* generators,
456 std::vector<int>* factorized_automorphism_group_size,
458 // Initialization.
459 time_limit_ = time_limit == nullptr ? &dummy_time_limit_ : time_limit;
460 IF_STATS_ENABLED(stats_.initialization_time.StartTimer());
461 generators->clear();
462 factorized_automorphism_group_size->clear();
463 if (node_equivalence_classes_io->size() != NumNodes()) {
464 return absl::Status(absl::StatusCode::kInvalidArgument,
465 "Invalid 'node_equivalence_classes_io'.");
466 }
467 DynamicPartition base_partition(*node_equivalence_classes_io);
468 // Break all inherent asymmetries in the graph.
469 {
470 ScopedTimeDistributionUpdater u(&stats_.initialization_refine_time);
471 RecursivelyRefinePartitionByAdjacency(/*first_unrefined_part_index=*/0,
472 &base_partition);
473 }
474 if (time_limit_->LimitReached()) {
475 return absl::Status(absl::StatusCode::kDeadlineExceeded,
476 "During the initial refinement.");
477 }
478 VLOG(4) << "Base partition: "
479 << base_partition.DebugString(/*sort_parts_lexicographically=*/false);
480
481 MergingPartition node_equivalence_classes(NumNodes());
482 std::vector<std::vector<int>> permutations_displacing_node(NumNodes());
483 std::vector<int> potential_root_image_nodes;
484 IF_STATS_ENABLED(stats_.initialization_time.StopTimerAndAddElapsedTime());
485
486 // To find all permutations of the Graph that satisfy the current partition,
487 // we pick an element v that is not in a singleton part, and we
488 // split the search in two phases:
489 // 1) Find (the generators of) all permutations that keep v invariant.
490 // 2) For each w in PartOf(v) such that w != v:
491 // find *one* permutation that maps v to w, if it exists.
492 // if it does exists, add this to the generators.
493 //
494 // The part 1) is recursive.
495 //
496 // Since we can't really use true recursion because it will be too deep for
497 // the stack, we implement it iteratively. To do that, we unroll 1):
498 // the "invariant dive" is a single pass that successively refines the node
499 // base_partition with elements from non-singleton parts (the 'invariant
500 // node'), until all parts are singletons.
501 // We remember which nodes we picked as invariants, and also the successive
502 // partition sizes as we refine it, to allow us to backtrack.
503 // Then we'll perform 2) in the reverse order, backtracking the stack from 1)
504 // as using another dedicated stack for the search (see below).
505 IF_STATS_ENABLED(stats_.invariant_dive_time.StartTimer());
506 struct InvariantDiveState {
507 int invariant_node;
508 int num_parts_before_refinement;
509
510 InvariantDiveState(int node, int num_parts)
511 : invariant_node(node), num_parts_before_refinement(num_parts) {}
512 };
513 std::vector<InvariantDiveState> invariant_dive_stack;
514 // TODO(user): experiment with, and briefly describe the results of various
515 // algorithms for picking the invariant node:
516 // - random selection
517 // - highest/lowest degree first
518 // - enumerate by part index; or by part size
519 // - etc.
520 for (int invariant_node = 0; invariant_node < NumNodes(); ++invariant_node) {
521 if (base_partition.ElementsInSamePartAs(invariant_node).size() == 1) {
522 continue;
523 }
524 invariant_dive_stack.push_back(
525 InvariantDiveState(invariant_node, base_partition.NumParts()));
526 DistinguishNodeInPartition(invariant_node, &base_partition, nullptr);
527 VLOG(4) << "Invariant dive: invariant node = " << invariant_node
528 << "; partition after: "
529 << base_partition.DebugString(
530 /*sort_parts_lexicographically=*/false);
531 if (time_limit_->LimitReached()) {
532 return absl::Status(absl::StatusCode::kDeadlineExceeded,
533 "During the invariant dive.");
534 }
535 }
536 DenseDoublyLinkedList representatives_sorted_by_index_in_partition(
537 base_partition.ElementsInHierarchicalOrder());
538 DynamicPartition image_partition = base_partition;
539 IF_STATS_ENABLED(stats_.invariant_dive_time.StopTimerAndAddElapsedTime());
540 // Now we've dived to the bottom: we're left with the identity permutation,
541 // which we don't need as a generator. We move on to phase 2).
542
543 IF_STATS_ENABLED(stats_.main_search_time.StartTimer());
544 while (!invariant_dive_stack.empty()) {
545 if (time_limit_->LimitReached()) break;
546 // Backtrack the last step of 1) (the invariant dive).
547 IF_STATS_ENABLED(stats_.invariant_unroll_time.StartTimer());
548 const int root_node = invariant_dive_stack.back().invariant_node;
549 const int base_num_parts =
550 invariant_dive_stack.back().num_parts_before_refinement;
551 invariant_dive_stack.pop_back();
552 base_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
553 image_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
554 VLOG(4) << "Backtracking invariant dive: root node = " << root_node
555 << "; partition: "
556 << base_partition.DebugString(
557 /*sort_parts_lexicographically=*/false);
558
559 // Now we'll try to map "root_node" to all image nodes that seem compatible
560 // and that aren't "root_node" itself.
561 //
562 // Doing so, we're able to detect potential bad (or good) matches by
563 // refining the 'base' partition with "root_node"; and refining the
564 // 'image' partition (which represents the partition of images nodes,
565 // i.e. the nodes after applying the currently implicit permutation)
566 // with that candidate image node: if the two partitions don't match, then
567 // the candidate image isn't compatible.
568 // If the partitions do match, we might either find the underlying
569 // permutation directly, or we might need to further try and map other
570 // nodes to their image: this is a recursive search with backtracking.
571
572 // The potential images of root_node are the nodes in its part. They can be
573 // pruned by the already computed equivalence classes.
574 // TODO(user): better elect the representative of each equivalence class
575 // in order to reduce the permutation support down the line
576 // TODO(user): Don't build a list; but instead use direct, inline iteration
577 // on the representatives in the while() loop below, to benefit from the
578 // incremental merging of the equivalence classes.
579 DCHECK_EQ(1, node_equivalence_classes.NumNodesInSamePartAs(root_node));
580 GetAllOtherRepresentativesInSamePartAs(
581 root_node, base_partition, representatives_sorted_by_index_in_partition,
582 &node_equivalence_classes, &potential_root_image_nodes);
583 DCHECK(!potential_root_image_nodes.empty());
584 IF_STATS_ENABLED(stats_.invariant_unroll_time.StopTimerAndAddElapsedTime());
585
586 // Try to map "root_node" to all of its potential images. For each image,
587 // we only care about finding a single compatible permutation, if it exists.
588 while (!potential_root_image_nodes.empty()) {
589 if (time_limit_->LimitReached()) break;
590 VLOG(4) << "Potential (pruned) images of root node " << root_node
591 << " left: [" << absl::StrJoin(potential_root_image_nodes, " ")
592 << "].";
593 const int root_image_node = potential_root_image_nodes.back();
594 VLOG(4) << "Trying image of root node: " << root_image_node;
595
596 std::unique_ptr<SparsePermutation> permutation =
597 FindOneSuitablePermutation(root_node, root_image_node,
598 &base_partition, &image_partition,
599 *generators, permutations_displacing_node);
600
601 if (permutation != nullptr) {
602 ScopedTimeDistributionUpdater u(&stats_.permutation_output_time);
603 // We found a permutation. We store it in the list of generators, and
604 // further prune out the remaining 'root' image candidates, taking into
605 // account the permutation we just found.
606 MergeNodeEquivalenceClassesAccordingToPermutation(
607 *permutation, &node_equivalence_classes,
608 &representatives_sorted_by_index_in_partition);
609 // HACK(user): to make sure that we keep root_image_node as the
610 // representant of its part, we temporarily move it to the front
611 // of the vector, then move it again to the back so that it gets
612 // deleted by the pop_back() below.
613 SwapFrontAndBack(&potential_root_image_nodes);
614 node_equivalence_classes.KeepOnlyOneNodePerPart(
615 &potential_root_image_nodes);
616 SwapFrontAndBack(&potential_root_image_nodes);
617
618 // Register it onto the permutations_displacing_node vector.
619 const int permutation_index = static_cast<int>(generators->size());
620 for (const int node : permutation->Support()) {
621 permutations_displacing_node[node].push_back(permutation_index);
622 }
623
624 // Move the permutation to the generator list (this also transfers
625 // ownership).
626 generators->push_back(std::move(permutation));
627 }
628
629 potential_root_image_nodes.pop_back();
630 }
631
632 // We keep track of the size of the orbit of 'root_node' under the
633 // current subgroup: this is one of the factors of the total group size.
634 // TODO(user): better, more complete explanation.
635 factorized_automorphism_group_size->push_back(
636 node_equivalence_classes.NumNodesInSamePartAs(root_node));
637 }
638 node_equivalence_classes.FillEquivalenceClasses(node_equivalence_classes_io);
639 IF_STATS_ENABLED(stats_.main_search_time.StopTimerAndAddElapsedTime());
640 IF_STATS_ENABLED(stats_.SetPrintOrder(StatsGroup::SORT_BY_NAME));
641 IF_STATS_ENABLED(LOG(INFO) << "Statistics: " << stats_.StatString());
642 if (time_limit_->LimitReached()) {
643 return absl::Status(absl::StatusCode::kDeadlineExceeded,
644 "Some automorphisms were found, but probably not all.");
645 }
646 return ::absl::OkStatus();
647}
648
649namespace {
650// This method can be easily understood in the context of
651// ConfirmFullMatchOrFindNextMappingDecision(): see its call sites.
652// Knowing that we want to map some element of part #part_index of
653// "base_partition" to part #part_index of "image_partition", pick the "best"
654// such mapping, for the global search algorithm.
655inline void GetBestMapping(const DynamicPartition& base_partition,
656 const DynamicPartition& image_partition,
657 int part_index, int* base_node, int* image_node) {
658 // As of pending CL 66620435, we've loosely tried three variants of
659 // GetBestMapping():
660 // 1) Just take the first element of the base part, map it to the first
661 // element of the image part.
662 // 2) Just take the first element of the base part, and map it to itself if
663 // possible, else map it to the first element of the image part
664 // 3) Scan all elements of the base parts until we find one that can map to
665 // itself. If there isn't one; we just fall back to the strategy 1).
666 //
667 // Variant 2) gives the best results on most benchmarks, in terms of speed,
668 // but 3) yields much smaller supports for the sat_holeXXX benchmarks, as
669 // long as it's combined with the other tweak enabled by
670 // FLAGS_minimize_permutation_support_size.
671 if (absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
672 // Variant 3).
673 for (const int node : base_partition.ElementsInPart(part_index)) {
674 if (image_partition.PartOf(node) == part_index) {
675 *image_node = *base_node = node;
676 return;
677 }
678 }
679 *base_node = *base_partition.ElementsInPart(part_index).begin();
680 *image_node = *image_partition.ElementsInPart(part_index).begin();
681 return;
682 }
683
684 // Variant 2).
685 *base_node = *base_partition.ElementsInPart(part_index).begin();
686 if (image_partition.PartOf(*base_node) == part_index) {
687 *image_node = *base_node;
688 } else {
689 *image_node = *image_partition.ElementsInPart(part_index).begin();
690 }
691}
692} // namespace
693
694// TODO(user): refactor this method and its submethods into a dedicated class
695// whose members will be ominously accessed by all the class methods; most
696// notably the search state stack. This may improve readability.
697std::unique_ptr<SparsePermutation>
698GraphSymmetryFinder::FindOneSuitablePermutation(
699 int root_node, int root_image_node, DynamicPartition* base_partition,
700 DynamicPartition* image_partition,
701 absl::Span<const std::unique_ptr<SparsePermutation>>
702 generators_found_so_far,
703 absl::Span<const std::vector<int>> permutations_displacing_node) {
704 // DCHECKs() and statistics.
705 ScopedTimeDistributionUpdater search_time_updater(&stats_.search_time);
706 DCHECK_EQ("", tmp_dynamic_permutation_.DebugString());
707 DCHECK_EQ(base_partition->NumParts(), image_partition->NumParts());
708 if (DEBUG_MODE) {
709 for (int i = 0; i < base_partition->NumParts(); ++i) {
710 DCHECK_EQ(base_partition->FprintOfPart(i),
711 image_partition->FprintOfPart(i))
712 << base_partition->DebugString(/*sort_parts_lexicographically=*/false)
713 << " "
714 << image_partition->DebugString(
715 /*sort_parts_lexicographically=*/false);
716 }
717 }
718 DCHECK(search_states_.empty());
719
720 // These will be used during the search. See their usage.
721 std::vector<int> base_singletons;
722 std::vector<int> image_singletons;
723 int next_base_node;
724 int next_image_node;
725 int min_potential_mismatching_part_index;
726 std::vector<int> next_potential_image_nodes;
727
728 // Initialize the search: we can already distinguish "root_node" in the base
729 // partition. See the comment below.
730 search_states_.emplace_back(
731 /*base_node=*/root_node, /*first_image_node=*/-1,
732 /*num_parts_before_trying_to_map_base_node=*/base_partition->NumParts(),
733 /*min_potential_mismatching_part_index=*/base_partition->NumParts());
734 // We inject the image node directly as the "remaining_pruned_image_nodes".
735 search_states_.back().remaining_pruned_image_nodes.assign(1, root_image_node);
736 {
737 ScopedTimeDistributionUpdater u(&stats_.initial_search_refine_time);
738 DistinguishNodeInPartition(root_node, base_partition, &base_singletons);
739 }
740 while (!search_states_.empty()) {
741 if (time_limit_->LimitReached()) return nullptr;
742 // When exploring a SearchState "ss", we're supposed to have:
743 // - A base_partition that has already been refined on ss->base_node.
744 // (base_singleton is the list of singletons created on the base
745 // partition during that refinement).
746 // - A non-empty list of potential image nodes (we'll try them in reverse
747 // order).
748 // - An image partition that hasn't been refined yet.
749 //
750 // Also, one should note that the base partition (before its refinement on
751 // base_node) was deemed compatible with the image partition as it is now.
752 const SearchState& ss = search_states_.back();
753 const int image_node = ss.first_image_node >= 0
754 ? ss.first_image_node
755 : ss.remaining_pruned_image_nodes.back();
756
757 // Statistics, DCHECKs.
758 IF_STATS_ENABLED(stats_.search_depth.Add(search_states_.size()));
759 DCHECK_EQ(ss.num_parts_before_trying_to_map_base_node,
760 image_partition->NumParts());
761
762 // Apply the decision: map base_node to image_node. Since base_partition
763 // was already refined on base_node, we just need to refine image_partition.
764 {
765 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
766 DistinguishNodeInPartition(image_node, image_partition,
767 &image_singletons);
768 }
769 VLOG(4) << ss.DebugString();
770 VLOG(4) << base_partition->DebugString(
771 /*sort_parts_lexicographically=*/false);
772 VLOG(4) << image_partition->DebugString(
773 /*sort_parts_lexicographically=*/false);
774
775 // Run some diagnoses on the two partitions. There are many outcomes, so
776 // it's a bit complicated:
777 // 1) The partitions are incompatible
778 // - Because of a straightfoward criterion (size mismatch).
779 // - Because they are both fully refined (i.e. singletons only), yet the
780 // permutation induced by them is not a graph automorpshim.
781 // 2) The partitions induce a permutation (all their non-singleton parts are
782 // identical), and this permutation is a graph automorphism.
783 // 3) The partitions need further refinement:
784 // - Because some non-singleton parts aren't equal in the base and image
785 // partition
786 // - Or because they are a full match (i.e. may induce a permutation,
787 // like in 2)), but the induced permutation isn't a graph automorphism.
788 bool compatible = true;
789 {
790 ScopedTimeDistributionUpdater u(&stats_.quick_compatibility_time);
791 compatible = PartitionsAreCompatibleAfterPartIndex(
792 *base_partition, *image_partition,
793 ss.num_parts_before_trying_to_map_base_node);
794 u.AlsoUpdate(compatible ? &stats_.quick_compatibility_success_time
795 : &stats_.quick_compatibility_fail_time);
796 }
797 bool partitions_are_full_match = false;
798 if (compatible) {
799 {
801 &stats_.dynamic_permutation_refinement_time);
802 tmp_dynamic_permutation_.AddMappings(base_singletons, image_singletons);
803 }
804 ScopedTimeDistributionUpdater u(&stats_.map_election_std_time);
805 min_potential_mismatching_part_index =
806 ss.min_potential_mismatching_part_index;
807 partitions_are_full_match = ConfirmFullMatchOrFindNextMappingDecision(
808 *base_partition, *image_partition, tmp_dynamic_permutation_,
809 &min_potential_mismatching_part_index, &next_base_node,
810 &next_image_node);
811 u.AlsoUpdate(partitions_are_full_match
812 ? &stats_.map_election_std_full_match_time
813 : &stats_.map_election_std_mapping_time);
814 }
815 if (compatible && partitions_are_full_match) {
816 DCHECK_EQ(min_potential_mismatching_part_index,
817 base_partition->NumParts());
818 // We have a permutation candidate!
819 // Note(user): we also deal with (extremely rare) false positives for
820 // "partitions_are_full_match" here: in case they aren't a full match,
821 // IsGraphAutomorphism() will catch that; and we'll simply deepen the
822 // search.
823 bool is_automorphism = true;
824 {
825 ScopedTimeDistributionUpdater u(&stats_.automorphism_test_time);
826 is_automorphism = IsGraphAutomorphism(tmp_dynamic_permutation_);
827 u.AlsoUpdate(is_automorphism ? &stats_.automorphism_test_success_time
828 : &stats_.automorphism_test_fail_time);
829 }
830 if (is_automorphism) {
831 ScopedTimeDistributionUpdater u(&stats_.search_finalize_time);
832 // We found a valid permutation. We can return it, but first we
833 // must restore the partitions to their original state.
834 std::unique_ptr<SparsePermutation> sparse_permutation(
835 tmp_dynamic_permutation_.CreateSparsePermutation());
836 VLOG(4) << "Automorphism found: " << sparse_permutation->DebugString();
837 const int base_num_parts =
838 search_states_[0].num_parts_before_trying_to_map_base_node;
839 base_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
840 image_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
841 tmp_dynamic_permutation_.Reset();
842 search_states_.clear();
843
844 search_time_updater.AlsoUpdate(&stats_.search_time_success);
845 return sparse_permutation;
846 }
847
848 // The permutation isn't a valid automorphism. Either the partitions were
849 // fully refined, and we deem them incompatible, or they weren't, and we
850 // consider them as 'not a full match'.
851 VLOG(4) << "Permutation candidate isn't a valid automorphism.";
852 if (base_partition->NumParts() == NumNodes()) {
853 // Fully refined: the partitions are incompatible.
854 compatible = false;
855 ScopedTimeDistributionUpdater u(&stats_.dynamic_permutation_undo_time);
856 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
857 } else {
858 ScopedTimeDistributionUpdater u(&stats_.map_reelection_time);
859 // TODO(user): try to get the non-singleton part from
860 // DynamicPermutation in O(1). On some graphs like the symmetry of the
861 // mip problem lectsched-4-obj.mps.gz, this take the majority of the
862 // time!
863 int non_singleton_part = 0;
864 {
865 ScopedTimeDistributionUpdater u(&stats_.non_singleton_search_time);
866 while (base_partition->SizeOfPart(non_singleton_part) == 1) {
867 ++non_singleton_part;
868 DCHECK_LT(non_singleton_part, base_partition->NumParts());
869 }
870 }
871 time_limit_->AdvanceDeterministicTime(
872 1e-9 * static_cast<double>(non_singleton_part));
873
874 // The partitions are compatible, but we'll deepen the search on some
875 // non-singleton part. We can pick any base and image node in this case.
876 GetBestMapping(*base_partition, *image_partition, non_singleton_part,
877 &next_base_node, &next_image_node);
878 }
879 }
880
881 // Now we've fully diagnosed our partitions, and have already dealt with
882 // case 2). We're left to deal with 1) and 3).
883 //
884 // Case 1): partitions are incompatible.
885 if (!compatible) {
886 ScopedTimeDistributionUpdater u(&stats_.backtracking_time);
887 // We invalidate the current image node, and prune the remaining image
888 // nodes. We might be left with no other image nodes, which means that
889 // we'll backtrack, i.e. pop our current SearchState and invalidate the
890 // 'current' image node of the upper SearchState (which might lead to us
891 // backtracking it, and so on).
892 while (!search_states_.empty()) {
893 SearchState* const last_ss = &search_states_.back();
894 image_partition->UndoRefineUntilNumPartsEqual(
895 last_ss->num_parts_before_trying_to_map_base_node);
896 if (last_ss->first_image_node >= 0) {
897 // Find out and prune the remaining potential image nodes: there is
898 // no permutation that maps base_node -> image_node that is
899 // compatible with the current partition, so there can't be a
900 // permutation that maps base_node -> X either, for all X in the orbit
901 // of 'image_node' under valid permutations compatible with the
902 // current partition. Ditto for other potential image nodes.
903 //
904 // TODO(user): fix this: we should really be collecting all
905 // permutations displacing any node in "image_part", for the pruning
906 // to be really exhaustive. We could also consider alternative ways,
907 // like incrementally maintaining the list of permutations compatible
908 // with the partition so far.
909 const int part = image_partition->PartOf(last_ss->first_image_node);
910 last_ss->remaining_pruned_image_nodes.reserve(
911 image_partition->SizeOfPart(part));
912 last_ss->remaining_pruned_image_nodes.push_back(
913 last_ss->first_image_node);
914 for (const int e : image_partition->ElementsInPart(part)) {
915 if (e != last_ss->first_image_node) {
916 last_ss->remaining_pruned_image_nodes.push_back(e);
917 }
918 }
919 {
920 ScopedTimeDistributionUpdater u(&stats_.pruning_time);
921 PruneOrbitsUnderPermutationsCompatibleWithPartition(
922 *image_partition, generators_found_so_far,
923 permutations_displacing_node[last_ss->first_image_node],
924 &last_ss->remaining_pruned_image_nodes);
925 }
926 SwapFrontAndBack(&last_ss->remaining_pruned_image_nodes);
927 DCHECK_EQ(last_ss->remaining_pruned_image_nodes.back(),
928 last_ss->first_image_node);
929 last_ss->first_image_node = -1;
930 }
931 last_ss->remaining_pruned_image_nodes.pop_back();
932 if (!last_ss->remaining_pruned_image_nodes.empty()) break;
933
934 VLOG(4) << "Backtracking one level up.";
935 base_partition->UndoRefineUntilNumPartsEqual(
936 last_ss->num_parts_before_trying_to_map_base_node);
937 // If this was the root search state (i.e. we fully backtracked and
938 // will exit the search after that), we don't have mappings to undo.
939 // We run UndoLastMappings() anyway, because it's a no-op in that case.
940 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
941 search_states_.pop_back();
942 }
943 // Continue the search.
944 continue;
945 }
946
947 // Case 3): we deepen the search.
948 // Since the search loop starts from an already-refined base_partition,
949 // we must do it here.
950 VLOG(4) << " Deepening the search.";
951 search_states_.emplace_back(
952 next_base_node, next_image_node,
953 /*num_parts_before_trying_to_map_base_node*/ base_partition->NumParts(),
954 min_potential_mismatching_part_index);
955 {
956 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
957 DistinguishNodeInPartition(next_base_node, base_partition,
958 &base_singletons);
959 }
960 }
961 // We exhausted the search; we didn't find any permutation.
962 search_time_updater.AlsoUpdate(&stats_.search_time_fail);
963 return nullptr;
964}
965
966util::BeginEndWrapper<std::vector<int>::const_iterator>
967GraphSymmetryFinder::TailsOfIncomingArcsTo(int node) const {
968 return util::BeginEndWrapper<std::vector<int>::const_iterator>(
969 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node],
970 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node + 1]);
971}
972
973void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition(
974 const DynamicPartition& partition,
975 absl::Span<const std::unique_ptr<SparsePermutation>> permutations,
976 absl::Span<const int> permutation_indices, std::vector<int>* nodes) {
977 VLOG(4) << " Pruning [" << absl::StrJoin(*nodes, ", ") << "]";
978 // TODO(user): apply a smarter test to decide whether to do the pruning
979 // or not: we can accurately estimate the cost of pruning (iterate through
980 // all generators found so far) and its estimated benefit (the cost of
981 // the search below the state that we're currently in, times the expected
982 // number of pruned nodes). Sometimes it may be better to skip the
983 // pruning.
984 if (nodes->size() <= 1) return;
985
986 // Iterate on all targeted permutations. If they are compatible, apply
987 // them to tmp_partition_ which will contain the incrementally merged
988 // equivalence classes.
989 std::vector<int>& tmp_nodes_on_support =
990 tmp_stack_; // Rename, for readability.
991 DCHECK(tmp_nodes_on_support.empty());
992 // TODO(user): investigate further optimizations: maybe it's possible
993 // to incrementally maintain the set of permutations that is compatible
994 // with the current partition, instead of recomputing it here?
995 for (const int p : permutation_indices) {
996 const SparsePermutation& permutation = *permutations[p];
997 // First, a quick compatibility check: the permutation's cycles must be
998 // smaller or equal to the size of the part that they are included in.
999 bool compatible = true;
1000 for (int c = 0; c < permutation.NumCycles(); ++c) {
1001 const SparsePermutation::Iterator cycle = permutation.Cycle(c);
1002 if (cycle.size() >
1003 partition.SizeOfPart(partition.PartOf(*cycle.begin()))) {
1004 compatible = false;
1005 break;
1006 }
1007 }
1008 if (!compatible) continue;
1009 // Now the full compatibility check: each cycle of the permutation must
1010 // be fully included in an image part.
1011 for (int c = 0; c < permutation.NumCycles(); ++c) {
1012 int part = -1;
1013 for (const int node : permutation.Cycle(c)) {
1014 if (partition.PartOf(node) != part) {
1015 if (part >= 0) {
1016 compatible = false;
1017 break;
1018 }
1019 part = partition.PartOf(node); // Initialization of 'part'.
1020 }
1021 }
1022 }
1023 if (!compatible) continue;
1024 // The permutation is fully compatible!
1025 // TODO(user): ignore cycles that are outside of image_part.
1026 MergeNodeEquivalenceClassesAccordingToPermutation(permutation,
1027 &tmp_partition_, nullptr);
1028 for (const int node : permutation.Support()) {
1029 if (!tmp_node_mask_[node]) {
1030 tmp_node_mask_[node] = true;
1031 tmp_nodes_on_support.push_back(node);
1032 }
1033 }
1034 }
1035
1036 // Apply the pruning.
1037 tmp_partition_.KeepOnlyOneNodePerPart(nodes);
1038
1039 // Reset the "tmp_" structures sparsely.
1040 for (const int node : tmp_nodes_on_support) {
1041 tmp_node_mask_[node] = false;
1042 tmp_partition_.ResetNode(node);
1043 }
1044 tmp_nodes_on_support.clear();
1045 VLOG(4) << " Pruned: [" << absl::StrJoin(*nodes, ", ") << "]";
1046}
1047
1048bool GraphSymmetryFinder::ConfirmFullMatchOrFindNextMappingDecision(
1049 const DynamicPartition& base_partition,
1050 const DynamicPartition& image_partition,
1051 const DynamicPermutation& current_permutation_candidate,
1052 int* min_potential_mismatching_part_index_io, int* next_base_node,
1053 int* next_image_node) const {
1054 *next_base_node = -1;
1055 *next_image_node = -1;
1056
1057 // The following clause should be true most of the times, except in some
1058 // specific use cases.
1059 if (!absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
1060 // First, we try to map the loose ends of the current permutations: these
1061 // loose ends can't be mapped to themselves, so we'll have to map them to
1062 // something anyway.
1063 for (const int loose_node : current_permutation_candidate.LooseEnds()) {
1064 DCHECK_GT(base_partition.ElementsInSamePartAs(loose_node).size(), 1);
1065 *next_base_node = loose_node;
1066 const int root = current_permutation_candidate.RootOf(loose_node);
1067 DCHECK_NE(root, loose_node);
1068 if (image_partition.PartOf(root) == base_partition.PartOf(loose_node)) {
1069 // We prioritize mapping a loose end to its own root (i.e. close a
1070 // cycle), if possible, like here: we exit immediately.
1071 *next_image_node = root;
1072 return false;
1073 }
1074 }
1075 if (*next_base_node != -1) {
1076 // We found loose ends, but none that mapped to its own root. Just pick
1077 // any valid image.
1078 *next_image_node =
1079 *image_partition
1080 .ElementsInPart(base_partition.PartOf(*next_base_node))
1081 .begin();
1082 return false;
1083 }
1084 }
1085
1086 // If there is no loose node (i.e. the current permutation only has closed
1087 // cycles), we fall back to picking any part that is different in the base and
1088 // image partitions; because we know that some mapping decision will have to
1089 // be made there.
1090 // SUBTLE: we use "min_potential_mismatching_part_index_io" to incrementally
1091 // keep running this search (for a mismatching part) from where we left off.
1092 // TODO(user): implement a simpler search for a mismatching part: it's
1093 // trivially possible if the base partition maintains a hash set of all
1094 // Fprints of its parts, and if the image partition uses that to maintain the
1095 // list of 'different' non-singleton parts.
1096 const int initial_min_potential_mismatching_part_index =
1097 *min_potential_mismatching_part_index_io;
1098 for (; *min_potential_mismatching_part_index_io < base_partition.NumParts();
1099 ++*min_potential_mismatching_part_index_io) {
1100 const int p = *min_potential_mismatching_part_index_io;
1101 if (base_partition.SizeOfPart(p) != 1 &&
1102 base_partition.FprintOfPart(p) != image_partition.FprintOfPart(p)) {
1103 GetBestMapping(base_partition, image_partition, p, next_base_node,
1104 next_image_node);
1105 return false;
1106 }
1107
1108 const int parent = base_partition.ParentOfPart(p);
1109 if (parent < initial_min_potential_mismatching_part_index &&
1110 base_partition.SizeOfPart(parent) != 1 &&
1111 base_partition.FprintOfPart(parent) !=
1112 image_partition.FprintOfPart(parent)) {
1113 GetBestMapping(base_partition, image_partition, parent, next_base_node,
1114 next_image_node);
1115 return false;
1116 }
1117 }
1118
1119 // We didn't find an unequal part. DCHECK that our "incremental" check was
1120 // actually correct and that all non-singleton parts are indeed equal.
1121 if (DEBUG_MODE) {
1122 for (int p = 0; p < base_partition.NumParts(); ++p) {
1123 if (base_partition.SizeOfPart(p) != 1) {
1124 CHECK_EQ(base_partition.FprintOfPart(p),
1125 image_partition.FprintOfPart(p));
1126 }
1127 }
1128 }
1129 return true;
1130}
1131
1132std::string GraphSymmetryFinder::SearchState::DebugString() const {
1133 return absl::StrFormat(
1134 "SearchState{ base_node=%d, first_image_node=%d,"
1135 " remaining_pruned_image_nodes=[%s],"
1136 " num_parts_before_trying_to_map_base_node=%d }",
1137 base_node, first_image_node,
1138 absl::StrJoin(remaining_pruned_image_nodes, " "),
1139 num_parts_before_trying_to_map_base_node);
1140}
1141
1142} // namespace operations_research
void Remove(int i)
You must not call Remove() twice with the same element.
const std::vector< int > & ElementsInHierarchicalOrder() const
std::string DebugString(bool sort_parts_lexicographically) const
IterablePart ElementsInSamePartAs(int i) const
void UndoRefineUntilNumPartsEqual(int original_num_parts)
void Refine(absl::Span< const int > distinguished_subset)
IterablePart ElementsInPart(int i) const
*** Implementation of inline methods of the above classes. ***
const std::vector< int > & AllMappingsSrc() const
Returns the union of all "src" ever given to AddMappings().
int ImageOf(int i) const
Forced-inline for the speed.
absl::Status FindSymmetries(std::vector< int > *node_equivalence_classes_io, std::vector< std::unique_ptr< SparsePermutation > > *generators, std::vector< int > *factorized_automorphism_group_size, TimeLimit *time_limit=nullptr)
void RecursivelyRefinePartitionByAdjacency(int first_unrefined_part_index, DynamicPartition *partition)
void DistinguishNodeInPartition(int node, DynamicPartition *partition, std::vector< int > *new_singletons_or_null)
**** Methods below are public FOR TESTING ONLY. ****
GraphSymmetryFinder(const Graph &graph, bool is_undirected)
bool IsGraphAutomorphism(const DynamicPermutation &permutation) const
void KeepOnlyOneNodePerPart(std::vector< int > *nodes)
int FillEquivalenceClasses(std::vector< int > *node_equivalence_classes)
NodeIndexType num_nodes() const
Definition graph.h:216
ArcIndexType num_arcs() const
Returns the number of valid arcs in the graph.
Definition graph.h:220
IntegerRange< NodeIndex > AllNodes() const
BaseGraph implementation -------------------------------------------------—.
Definition graph.h:968
BeginEndWrapper< OutgoingArcIterator > OutgoingArcs(NodeIndexType node) const
NodeIndexType Head(ArcIndexType arc) const
Definition graph.h:1370
ABSL_FLAG(bool, minimize_permutation_support_size, false, "Tweak the algorithm to try and minimize the support size" " of the generators produced. This may negatively impact the" " performance, but works great on the sat_holeXXX benchmarks" " to reduce the support size.")
time_limit
Definition solve.cc:22
const bool DEBUG_MODE
Definition macros.h:26
In SWIG mode, we don't want anything besides these top-level includes.
void LocalBfs(const ::util::StaticGraph< int, int > &graph, int source, int stop_after_num_nodes, std::vector< int > *visited, std::vector< int > *num_within_radius, std::vector< bool > *tmp_mask)
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition stats.h:414
std::vector< int > CountTriangles(const ::util::StaticGraph< int, int > &graph, int max_degree)
HELPER FUNCTIONS: PUBLIC FOR UNIT TESTING ONLY.
bool GraphIsSymmetric(const Graph &graph)
Definition util.h:221
bool GraphIsSymmetric(const Graph &graph)
Definition util.h:221
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
std::vector< int >::const_iterator begin() const