OR-Tools  8.2
find_graph_symmetries.cc
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <limits>
18#include <numeric>
19
20#include "absl/memory/memory.h"
21#include "absl/status/status.h"
22#include "absl/strings/str_format.h"
23#include "absl/strings/str_join.h"
24#include "absl/time/clock.h"
25#include "absl/time/time.h"
32#include "ortools/graph/util.h"
33
34ABSL_FLAG(bool, minimize_permutation_support_size, false,
35 "Tweak the algorithm to try and minimize the support size"
36 " of the generators produced. This may negatively impact the"
37 " performance, but works great on the sat_holeXXX benchmarks"
38 " to reduce the support size.");
39
40namespace operations_research {
41
43
44namespace {
45// Some routines used below.
46void SwapFrontAndBack(std::vector<int>* v) {
47 DCHECK(!v->empty());
48 std::swap((*v)[0], v->back());
49}
50
51bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition& p1,
52 const DynamicPartition& p2,
53 int part_index) {
54 const int num_parts = p1.NumParts();
55 if (p2.NumParts() != num_parts) return false;
56 for (int p = part_index; p < num_parts; ++p) {
57 if (p1.SizeOfPart(p) != p2.SizeOfPart(p) ||
58 p1.ParentOfPart(p) != p2.ParentOfPart(p)) {
59 return false;
60 }
61 }
62 return true;
63}
64
65// Whether the "l1" list maps to "l2" under the permutation "permutation".
66// This method uses a transient bitmask on all the elements, which
67// should be entirely false before the call (and will be restored as such
68// after it).
69//
70// TODO(user): Make this method support multi-elements (i.e. an element may
71// be repeated in the list), and see if that's sufficient to make the whole
72// graph symmetry finder support multi-arcs.
73template <class List>
74bool ListMapsToList(const List& l1, const List& l2,
75 const DynamicPermutation& permutation,
76 std::vector<bool>* tmp_node_mask) {
77 int num_elements_delta = 0;
78 bool match = true;
79 for (const int mapped_x : l2) {
80 ++num_elements_delta;
81 (*tmp_node_mask)[mapped_x] = true;
82 }
83 for (const int x : l1) {
84 --num_elements_delta;
85 const int mapped_x = permutation.ImageOf(x);
86 if (!(*tmp_node_mask)[mapped_x]) {
87 match = false;
88 break;
89 }
90 (*tmp_node_mask)[mapped_x] = false;
91 }
92 if (num_elements_delta != 0) match = false;
93 if (!match) {
94 // We need to clean up tmp_node_mask.
95 for (const int x : l2) (*tmp_node_mask)[x] = false;
96 }
97 return match;
98}
99} // namespace
100
101GraphSymmetryFinder::GraphSymmetryFinder(const Graph& graph, bool is_undirected)
102 : graph_(graph),
103 tmp_dynamic_permutation_(NumNodes()),
104 tmp_node_mask_(NumNodes(), false),
105 tmp_degree_(NumNodes(), 0),
106 tmp_nodes_with_degree_(NumNodes() + 1) {
107 // Set up an "unlimited" time limit by default.
108 time_limit_ = &dummy_time_limit_;
109 tmp_partition_.Reset(NumNodes());
110 if (is_undirected) {
111 DCHECK(GraphIsSymmetric(graph));
112 } else {
113 // Compute the reverse adjacency lists.
114 // First pass: compute the total in-degree of all nodes and put it in
115 // reverse_adj_list_index (shifted by two; see below why).
116 reverse_adj_list_index_.assign(graph.num_nodes() + /*shift*/ 2, 0);
117 for (const int node : graph.AllNodes()) {
118 for (const int arc : graph.OutgoingArcs(node)) {
119 ++reverse_adj_list_index_[graph.Head(arc) + /*shift*/ 2];
120 }
121 }
122 // Second pass: apply a cumulative sum over reverse_adj_list_index.
123 // After that, reverse_adj_list contains:
124 // [0, 0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
125 std::partial_sum(reverse_adj_list_index_.begin() + /*shift*/ 2,
126 reverse_adj_list_index_.end(),
127 reverse_adj_list_index_.begin() + /*shift*/ 2);
128 // Third pass: populate "flattened_reverse_adj_lists", using
129 // reverse_adj_list_index[i] as a dynamic pointer to the yet-unpopulated
130 // area of the reverse adjacency list of node #i.
131 flattened_reverse_adj_lists_.assign(graph.num_arcs(), -1);
132 for (const int node : graph.AllNodes()) {
133 for (const int arc : graph.OutgoingArcs(node)) {
134 flattened_reverse_adj_lists_[reverse_adj_list_index_[graph.Head(arc) +
135 /*shift*/ 1]++] =
136 node;
137 }
138 }
139 // The last pass shifted reverse_adj_list_index, so it's now as we want it:
140 // [0, in_degree(node0), in_degree(node0) + in_degree(node1), ...]
141 if (DEBUG_MODE) {
142 DCHECK_EQ(graph.num_arcs(), reverse_adj_list_index_[graph.num_nodes()]);
143 for (const int i : flattened_reverse_adj_lists_) DCHECK_NE(i, -1);
144 }
145 }
146}
147
149 const DynamicPermutation& permutation) const {
150 for (const int base : permutation.AllMappingsSrc()) {
151 const int image = permutation.ImageOf(base);
152 if (image == base) continue;
153 if (!ListMapsToList(graph_[base], graph_[image], permutation,
154 &tmp_node_mask_)) {
155 return false;
156 }
157 }
158 if (!reverse_adj_list_index_.empty()) {
159 // The graph was not symmetric: we must also check the incoming arcs
160 // to displaced nodes.
161 for (const int base : permutation.AllMappingsSrc()) {
162 const int image = permutation.ImageOf(base);
163 if (image == base) continue;
164 if (!ListMapsToList(TailsOfIncomingArcsTo(base),
165 TailsOfIncomingArcsTo(image), permutation,
166 &tmp_node_mask_)) {
167 return false;
168 }
169 }
170 }
171 return true;
172}
173
174namespace {
175// Specialized subroutine, to avoid code duplication: see its call site
176// and its self-explanatory code.
177template <class T>
178inline void IncrementCounterForNonSingletons(const T& nodes,
179 const DynamicPartition& partition,
180 std::vector<int>* node_count,
181 std::vector<int>* nodes_seen,
182 int64* num_operations) {
183 *num_operations += nodes.end() - nodes.begin();
184 for (const int node : nodes) {
185 if (partition.ElementsInSamePartAs(node).size() == 1) continue;
186 const int count = ++(*node_count)[node];
187 if (count == 1) nodes_seen->push_back(node);
188 }
189}
190} // namespace
191
193 int first_unrefined_part_index, DynamicPartition* partition) {
194 // Rename, for readability of the code below.
195 std::vector<int>& tmp_nodes_with_nonzero_degree = tmp_stack_;
196
197 // This function is the main bottleneck of the whole algorithm. We count the
198 // number of blocks in the inner-most loops in num_operations. At the end we
199 // will multiply it by a factor to have some deterministic time that we will
200 // append to the deterministic time counter.
201 //
202 // TODO(user): We are really imprecise in our counting, but it is fine. We
203 // just need a way to enforce a deterministic limit on the computation effort.
204 int64 num_operations = 0;
205
206 // Assuming that the partition was refined based on the adjacency on
207 // parts [0 .. first_unrefined_part_index) already, we simply need to
208 // refine parts first_unrefined_part_index ... NumParts()-1, the latter bound
209 // being a moving target:
210 // When a part #p < first_unrefined_part_index gets modified, it's always
211 // split in two: itself, and a new part #p'. Since #p was already refined
212 // on, we only need to further refine on *one* of its two split parts.
213 // And this will be done because p' > first_unrefined_part_index.
214 //
215 // Thus, the following loop really does the full recursive refinement as
216 // advertised.
217 std::vector<bool> adjacency_directions(1, /*outgoing*/ true);
218 if (!reverse_adj_list_index_.empty()) {
219 adjacency_directions.push_back(false); // Also look at incoming arcs.
220 }
221 for (int part_index = first_unrefined_part_index;
222 part_index < partition->NumParts(); // Moving target!
223 ++part_index) {
224 for (const bool outgoing_adjacency : adjacency_directions) {
225 // Count the aggregated degree of all nodes, only looking at arcs that
226 // come from/to the current part.
227 if (outgoing_adjacency) {
228 for (const int node : partition->ElementsInPart(part_index)) {
229 IncrementCounterForNonSingletons(
230 graph_[node], *partition, &tmp_degree_,
231 &tmp_nodes_with_nonzero_degree, &num_operations);
232 }
233 } else {
234 for (const int node : partition->ElementsInPart(part_index)) {
235 IncrementCounterForNonSingletons(
236 TailsOfIncomingArcsTo(node), *partition, &tmp_degree_,
237 &tmp_nodes_with_nonzero_degree, &num_operations);
238 }
239 }
240 // Group the nodes by (nonzero) degree. Remember the maximum degree.
241 int max_degree = 0;
242 num_operations += 3 + tmp_nodes_with_nonzero_degree.size();
243 for (const int node : tmp_nodes_with_nonzero_degree) {
244 const int degree = tmp_degree_[node];
245 tmp_degree_[node] = 0; // To clean up after us.
246 max_degree = std::max(max_degree, degree);
247 tmp_nodes_with_degree_[degree].push_back(node);
248 }
249 tmp_nodes_with_nonzero_degree.clear(); // To clean up after us.
250 // For each degree, refine the partition by the set of nodes with that
251 // degree.
252 for (int degree = 1; degree <= max_degree; ++degree) {
253 // We use a manually tuned factor 3 because Refine() does quite a bit of
254 // operations for each node in its argument.
255 num_operations += 1 + 3 * tmp_nodes_with_degree_[degree].size();
256 partition->Refine(tmp_nodes_with_degree_[degree]);
257 tmp_nodes_with_degree_[degree].clear(); // To clean up after us.
258 }
259 }
260 }
261
262 // The coefficient was manually tuned (only on a few instances) so that the
263 // time is roughly correlated with seconds on a fast desktop computer from
264 // 2020.
265 time_limit_->AdvanceDeterministicTime(1e-8 *
266 static_cast<double>(num_operations));
267}
268
270 int node, DynamicPartition* partition, std::vector<int>* new_singletons) {
271 const int original_num_parts = partition->NumParts();
272 partition->Refine(std::vector<int>(1, node));
273 RecursivelyRefinePartitionByAdjacency(partition->PartOf(node), partition);
274
275 // Explore the newly refined parts to gather all the new singletons.
276 if (new_singletons != nullptr) {
277 new_singletons->clear();
278 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
279 const int parent = partition->ParentOfPart(p);
280 // We may see the same singleton parent several times, so we guard them
281 // with the tmp_node_mask_ boolean vector.
282 if (!tmp_node_mask_[parent] && parent < original_num_parts &&
283 partition->SizeOfPart(parent) == 1) {
284 tmp_node_mask_[parent] = true;
285 new_singletons->push_back(*partition->ElementsInPart(parent).begin());
286 }
287 if (partition->SizeOfPart(p) == 1) {
288 new_singletons->push_back(*partition->ElementsInPart(p).begin());
289 }
290 }
291 // Reset tmp_node_mask_.
292 for (int p = original_num_parts; p < partition->NumParts(); ++p) {
293 tmp_node_mask_[partition->ParentOfPart(p)] = false;
294 }
295 }
296}
297
298namespace {
299void MergeNodeEquivalenceClassesAccordingToPermutation(
300 const SparsePermutation& perm, MergingPartition* node_equivalence_classes,
301 DenseDoublyLinkedList* sorted_representatives) {
302 for (int c = 0; c < perm.NumCycles(); ++c) {
303 // TODO(user): use the global element->image iterator when it exists.
304 int prev = -1;
305 for (const int e : perm.Cycle(c)) {
306 if (prev >= 0) {
307 const int removed_representative =
308 node_equivalence_classes->MergePartsOf(prev, e);
309 if (sorted_representatives != nullptr && removed_representative != -1) {
310 sorted_representatives->Remove(removed_representative);
311 }
312 }
313 prev = e;
314 }
315 }
316}
317
318// Subroutine used by FindSymmetries(); see its call site. This finds and
319// outputs (in "pruned_other_nodes") the list of all representatives (under
320// "node_equivalence_classes") that are in the same part as
321// "representative_node" in "partition"; other than "representative_node"
322// itself.
323// "node_equivalence_classes" must be compatible with "partition", i.e. two
324// nodes that are in the same equivalence class must also be in the same part.
325//
326// To do this in O(output size), we also need the
327// "representatives_sorted_by_index_in_partition" data structure: the
328// representatives of the nodes of the targeted part are contiguous in that
329// linked list.
330void GetAllOtherRepresentativesInSamePartAs(
331 int representative_node, const DynamicPartition& partition,
332 const DenseDoublyLinkedList& representatives_sorted_by_index_in_partition,
333 MergingPartition* node_equivalence_classes, // Only for debugging.
334 std::vector<int>* pruned_other_nodes) {
335 pruned_other_nodes->clear();
336 const int part_index = partition.PartOf(representative_node);
337 // Iterate on all contiguous representatives after the initial one...
338 int repr = representative_node;
339 while (true) {
340 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
341 repr = representatives_sorted_by_index_in_partition.Prev(repr);
342 if (repr < 0 || partition.PartOf(repr) != part_index) break;
343 pruned_other_nodes->push_back(repr);
344 }
345 // ... and then on all contiguous representatives *before* it.
346 repr = representative_node;
347 while (true) {
348 DCHECK_EQ(repr, node_equivalence_classes->GetRoot(repr));
349 repr = representatives_sorted_by_index_in_partition.Next(repr);
350 if (repr < 0 || partition.PartOf(repr) != part_index) break;
351 pruned_other_nodes->push_back(repr);
352 }
353
354 // This code is a bit tricky, so we check that we're doing it right, by
355 // comparing its output to the brute-force, O(Part size) version.
356 // This also (partly) verifies that
357 // "representatives_sorted_by_index_in_partition" is what it claims it is.
358 if (DEBUG_MODE) {
359 std::vector<int> expected_output;
360 for (const int e : partition.ElementsInPart(part_index)) {
361 if (node_equivalence_classes->GetRoot(e) != representative_node) {
362 expected_output.push_back(e);
363 }
364 }
365 node_equivalence_classes->KeepOnlyOneNodePerPart(&expected_output);
366 for (int& x : expected_output) x = node_equivalence_classes->GetRoot(x);
367 std::sort(expected_output.begin(), expected_output.end());
368 std::vector<int> sorted_output = *pruned_other_nodes;
369 std::sort(sorted_output.begin(), sorted_output.end());
370 DCHECK_EQ(absl::StrJoin(expected_output, " "),
371 absl::StrJoin(sorted_output, " "));
372 }
373}
374} // namespace
375
377 std::vector<int>* node_equivalence_classes_io,
378 std::vector<std::unique_ptr<SparsePermutation>>* generators,
379 std::vector<int>* factorized_automorphism_group_size,
381 // Initialization.
382 time_limit_ = time_limit == nullptr ? &dummy_time_limit_ : time_limit;
383 IF_STATS_ENABLED(stats_.initialization_time.StartTimer());
384 generators->clear();
385 factorized_automorphism_group_size->clear();
386 if (node_equivalence_classes_io->size() != NumNodes()) {
387 return absl::Status(absl::StatusCode::kInvalidArgument,
388 "Invalid 'node_equivalence_classes_io'.");
389 }
390 DynamicPartition base_partition(*node_equivalence_classes_io);
391 // Break all inherent asymmetries in the graph.
392 {
393 ScopedTimeDistributionUpdater u(&stats_.initialization_refine_time);
394 RecursivelyRefinePartitionByAdjacency(/*first_unrefined_part_index=*/0,
395 &base_partition);
396 }
397 if (time_limit_->LimitReached()) {
398 return absl::Status(absl::StatusCode::kDeadlineExceeded,
399 "During the initial refinement.");
400 }
401 VLOG(4) << "Base partition: "
403
404 MergingPartition node_equivalence_classes(NumNodes());
405 std::vector<std::vector<int>> permutations_displacing_node(NumNodes());
406 std::vector<int> potential_root_image_nodes;
407 IF_STATS_ENABLED(stats_.initialization_time.StopTimerAndAddElapsedTime());
408
409 // To find all permutations of the Graph that satisfy the current partition,
410 // we pick an element v that is not in a singleton part, and we
411 // split the search in two phases:
412 // 1) Find (the generators of) all permutations that keep v invariant.
413 // 2) For each w in PartOf(v) such that w != v:
414 // find *one* permutation that maps v to w, if it exists.
415 // if it does exists, add this to the generators.
416 //
417 // The part 1) is recursive.
418 //
419 // Since we can't really use true recursion because it will be too deep for
420 // the stack, we implement it iteratively. To do that, we unroll 1):
421 // the "invariant dive" is a single pass that successively refines the node
422 // base_partition with elements from non-singleton parts (the 'invariant
423 // node'), until all parts are singletons.
424 // We remember which nodes we picked as invariants, and also the successive
425 // partition sizes as we refine it, to allow us to backtrack.
426 // Then we'll perform 2) in the reverse order, backtracking the stack from 1)
427 // as using another dedicated stack for the search (see below).
428 IF_STATS_ENABLED(stats_.invariant_dive_time.StartTimer());
429 struct InvariantDiveState {
430 int invariant_node;
431 int num_parts_before_refinement;
432
433 InvariantDiveState(int node, int num_parts)
434 : invariant_node(node), num_parts_before_refinement(num_parts) {}
435 };
436 std::vector<InvariantDiveState> invariant_dive_stack;
437 // TODO(user): experiment with, and briefly describe the results of various
438 // algorithms for picking the invariant node:
439 // - random selection
440 // - highest/lowest degree first
441 // - enumerate by part index; or by part size
442 // - etc.
443 for (int invariant_node = 0; invariant_node < NumNodes(); ++invariant_node) {
444 if (base_partition.ElementsInSamePartAs(invariant_node).size() == 1) {
445 continue;
446 }
447 invariant_dive_stack.push_back(
448 InvariantDiveState(invariant_node, base_partition.NumParts()));
449 DistinguishNodeInPartition(invariant_node, &base_partition, nullptr);
450 VLOG(4) << "Invariant dive: invariant node = " << invariant_node
451 << "; partition after: "
453 if (time_limit_->LimitReached()) {
454 return absl::Status(absl::StatusCode::kDeadlineExceeded,
455 "During the invariant dive.");
456 }
457 }
458 DenseDoublyLinkedList representatives_sorted_by_index_in_partition(
459 base_partition.ElementsInHierarchicalOrder());
460 DynamicPartition image_partition = base_partition;
461 IF_STATS_ENABLED(stats_.invariant_dive_time.StopTimerAndAddElapsedTime());
462 // Now we've dived to the bottom: we're left with the identity permutation,
463 // which we don't need as a generator. We move on to phase 2).
464
465 IF_STATS_ENABLED(stats_.main_search_time.StartTimer());
466 while (!invariant_dive_stack.empty()) {
467 if (time_limit_->LimitReached()) break;
468 // Backtrack the last step of 1) (the invariant dive).
469 IF_STATS_ENABLED(stats_.invariant_unroll_time.StartTimer());
470 const int root_node = invariant_dive_stack.back().invariant_node;
471 const int base_num_parts =
472 invariant_dive_stack.back().num_parts_before_refinement;
473 invariant_dive_stack.pop_back();
474 base_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
475 image_partition.UndoRefineUntilNumPartsEqual(base_num_parts);
476 VLOG(4) << "Backtracking invariant dive: root node = " << root_node
477 << "; partition: "
479
480 // Now we'll try to map "root_node" to all image nodes that seem compatible
481 // and that aren't "root_node" itself.
482 //
483 // Doing so, we're able to detect potential bad (or good) matches by
484 // refining the 'base' partition with "root_node"; and refining the
485 // 'image' partition (which represents the partition of images nodes,
486 // i.e. the nodes after applying the currently implicit permutation)
487 // with that candidate image node: if the two partitions don't match, then
488 // the candidate image isn't compatible.
489 // If the partitions do match, we might either find the underlying
490 // permutation directly, or we might need to further try and map other
491 // nodes to their image: this is a recursive search with backtracking.
492
493 // The potential images of root_node are the nodes in its part. They can be
494 // pruned by the already computed equivalence classes.
495 // TODO(user): better elect the representative of each equivalence class
496 // in order to reduce the permutation support down the line
497 // TODO(user): Don't build a list; but instead use direct, inline iteration
498 // on the representatives in the while() loop below, to benefit from the
499 // incremental merging of the equivalence classes.
500 DCHECK_EQ(1, node_equivalence_classes.NumNodesInSamePartAs(root_node));
501 GetAllOtherRepresentativesInSamePartAs(
502 root_node, base_partition, representatives_sorted_by_index_in_partition,
503 &node_equivalence_classes, &potential_root_image_nodes);
504 DCHECK(!potential_root_image_nodes.empty());
505 IF_STATS_ENABLED(stats_.invariant_unroll_time.StopTimerAndAddElapsedTime());
506
507 // Try to map "root_node" to all of its potential images. For each image,
508 // we only care about finding a single compatible permutation, if it exists.
509 while (!potential_root_image_nodes.empty()) {
510 if (time_limit_->LimitReached()) break;
511 VLOG(4) << "Potential (pruned) images of root node " << root_node
512 << " left: [" << absl::StrJoin(potential_root_image_nodes, " ")
513 << "].";
514 const int root_image_node = potential_root_image_nodes.back();
515 VLOG(4) << "Trying image of root node: " << root_image_node;
516
517 std::unique_ptr<SparsePermutation> permutation =
518 FindOneSuitablePermutation(root_node, root_image_node,
519 &base_partition, &image_partition,
520 *generators, permutations_displacing_node);
521
522 if (permutation != nullptr) {
523 ScopedTimeDistributionUpdater u(&stats_.permutation_output_time);
524 // We found a permutation. We store it in the list of generators, and
525 // further prune out the remaining 'root' image candidates, taking into
526 // account the permutation we just found.
527 MergeNodeEquivalenceClassesAccordingToPermutation(
528 *permutation, &node_equivalence_classes,
529 &representatives_sorted_by_index_in_partition);
530 // HACK(user): to make sure that we keep root_image_node as the
531 // representant of its part, we temporarily move it to the front
532 // of the vector, then move it again to the back so that it gets
533 // deleted by the pop_back() below.
534 SwapFrontAndBack(&potential_root_image_nodes);
535 node_equivalence_classes.KeepOnlyOneNodePerPart(
536 &potential_root_image_nodes);
537 SwapFrontAndBack(&potential_root_image_nodes);
538
539 // Register it onto the permutations_displacing_node vector.
540 const int permutation_index = static_cast<int>(generators->size());
541 for (const int node : permutation->Support()) {
542 permutations_displacing_node[node].push_back(permutation_index);
543 }
544
545 // Move the permutation to the generator list (this also transfers
546 // ownership).
547 generators->push_back(std::move(permutation));
548 }
549
550 potential_root_image_nodes.pop_back();
551 }
552
553 // We keep track of the size of the orbit of 'root_node' under the
554 // current subgroup: this is one of the factors of the total group size.
555 // TODO(user): better, more complete explanation.
556 factorized_automorphism_group_size->push_back(
557 node_equivalence_classes.NumNodesInSamePartAs(root_node));
558 }
559 node_equivalence_classes.FillEquivalenceClasses(node_equivalence_classes_io);
560 IF_STATS_ENABLED(stats_.main_search_time.StopTimerAndAddElapsedTime());
561 IF_STATS_ENABLED(stats_.SetPrintOrder(StatsGroup::SORT_BY_NAME));
562 IF_STATS_ENABLED(LOG(INFO) << "Statistics: " << stats_.StatString());
563 if (time_limit_->LimitReached()) {
564 return absl::Status(absl::StatusCode::kDeadlineExceeded,
565 "Some automorphisms were found, but probably not all.");
566 }
567 return ::absl::OkStatus();
568}
569
570namespace {
571// This method can be easily understood in the context of
572// ConfirmFullMatchOrFindNextMappingDecision(): see its call sites.
573// Knowing that we want to map some element of part #part_index of
574// "base_partition" to part #part_index of "image_partition", pick the "best"
575// such mapping, for the global search algorithm.
576inline void GetBestMapping(const DynamicPartition& base_partition,
577 const DynamicPartition& image_partition,
578 int part_index, int* base_node, int* image_node) {
579 // As of pending CL 66620435, we've loosely tried three variants of
580 // GetBestMapping():
581 // 1) Just take the first element of the base part, map it to the first
582 // element of the image part.
583 // 2) Just take the first element of the base part, and map it to itself if
584 // possible, else map it to the first element of the image part
585 // 3) Scan all elements of the base parts until we find one that can map to
586 // itself. If there isn't one; we just fall back to the strategy 1).
587 //
588 // Variant 2) gives the best results on most benchmarks, in terms of speed,
589 // but 3) yields much smaller supports for the sat_holeXXX benchmarks, as
590 // long as it's combined with the other tweak enabled by
591 // FLAGS_minimize_permutation_support_size.
592 if (absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
593 // Variant 3).
594 for (const int node : base_partition.ElementsInPart(part_index)) {
595 if (image_partition.PartOf(node) == part_index) {
596 *image_node = *base_node = node;
597 return;
598 }
599 }
600 *base_node = *base_partition.ElementsInPart(part_index).begin();
601 *image_node = *image_partition.ElementsInPart(part_index).begin();
602 return;
603 }
604
605 // Variant 2).
606 *base_node = *base_partition.ElementsInPart(part_index).begin();
607 if (image_partition.PartOf(*base_node) == part_index) {
608 *image_node = *base_node;
609 } else {
610 *image_node = *image_partition.ElementsInPart(part_index).begin();
611 }
612}
613} // namespace
614
615// TODO(user): refactor this method and its submethods into a dedicated class
616// whose members will be ominously accessed by all the class methods; most
617// notably the search state stack. This may improve readability.
618std::unique_ptr<SparsePermutation>
619GraphSymmetryFinder::FindOneSuitablePermutation(
620 int root_node, int root_image_node, DynamicPartition* base_partition,
621 DynamicPartition* image_partition,
622 const std::vector<std::unique_ptr<SparsePermutation>>&
623 generators_found_so_far,
624 const std::vector<std::vector<int>>& permutations_displacing_node) {
625 // DCHECKs() and statistics.
626 ScopedTimeDistributionUpdater search_time_updater(&stats_.search_time);
627 DCHECK_EQ("", tmp_dynamic_permutation_.DebugString());
628 DCHECK_EQ(base_partition->DebugString(DynamicPartition::SORT_BY_PART),
629 image_partition->DebugString(DynamicPartition::SORT_BY_PART));
630 DCHECK(search_states_.empty());
631
632 // These will be used during the search. See their usage.
633 std::vector<int> base_singletons;
634 std::vector<int> image_singletons;
635 int next_base_node;
636 int next_image_node;
637 int min_potential_mismatching_part_index;
638 std::vector<int> next_potential_image_nodes;
639
640 // Initialize the search: we can already distinguish "root_node" in the base
641 // partition. See the comment below.
642 search_states_.emplace_back(
643 /*base_node=*/root_node, /*first_image_node=*/-1,
644 /*num_parts_before_trying_to_map_base_node=*/base_partition->NumParts(),
645 /*min_potential_mismatching_part_index=*/base_partition->NumParts());
646 // We inject the image node directly as the "remaining_pruned_image_nodes".
647 search_states_.back().remaining_pruned_image_nodes.assign(1, root_image_node);
648 {
649 ScopedTimeDistributionUpdater u(&stats_.initial_search_refine_time);
650 DistinguishNodeInPartition(root_node, base_partition, &base_singletons);
651 }
652 while (!search_states_.empty()) {
653 if (time_limit_->LimitReached()) return nullptr;
654 // When exploring a SearchState "ss", we're supposed to have:
655 // - A base_partition that has already been refined on ss->base_node.
656 // (base_singleton is the list of singletons created on the base
657 // partition during that refinement).
658 // - A non-empty list of potential image nodes (we'll try them in reverse
659 // order).
660 // - An image partition that hasn't been refined yet.
661 //
662 // Also, one should note that the base partition (before its refinement on
663 // base_node) was deemed compatible with the image partition as it is now.
664 const SearchState& ss = search_states_.back();
665 const int image_node = ss.first_image_node >= 0
666 ? ss.first_image_node
667 : ss.remaining_pruned_image_nodes.back();
668
669 // Statistics, DCHECKs.
670 IF_STATS_ENABLED(stats_.search_depth.Add(search_states_.size()));
671 DCHECK_EQ(ss.num_parts_before_trying_to_map_base_node,
672 image_partition->NumParts());
673
674 // Apply the decision: map base_node to image_node. Since base_partition
675 // was already refined on base_node, we just need to refine image_partition.
676 {
677 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
678 DistinguishNodeInPartition(image_node, image_partition,
679 &image_singletons);
680 }
681 VLOG(4) << ss.DebugString();
682 VLOG(4) << base_partition->DebugString(DynamicPartition::SORT_BY_PART);
683 VLOG(4) << image_partition->DebugString(DynamicPartition::SORT_BY_PART);
684
685 // Run some diagnoses on the two partitions. There are many outcomes, so
686 // it's a bit complicated:
687 // 1) The partitions are incompatible
688 // - Because of a straightfoward criterion (size mismatch).
689 // - Because they are both fully refined (i.e. singletons only), yet the
690 // permutation induced by them is not a graph automorpshim.
691 // 2) The partitions induce a permutation (all their non-singleton parts are
692 // identical), and this permutation is a graph automorphism.
693 // 3) The partitions need further refinement:
694 // - Because some non-singleton parts aren't equal in the base and image
695 // partition
696 // - Or because they are a full match (i.e. may induce a permutation,
697 // like in 2)), but the induced permutation isn't a graph automorphism.
698 bool compatible = true;
699 {
700 ScopedTimeDistributionUpdater u(&stats_.quick_compatibility_time);
701 compatible = PartitionsAreCompatibleAfterPartIndex(
702 *base_partition, *image_partition,
703 ss.num_parts_before_trying_to_map_base_node);
704 u.AlsoUpdate(compatible ? &stats_.quick_compatibility_success_time
705 : &stats_.quick_compatibility_fail_time);
706 }
707 bool partitions_are_full_match = false;
708 if (compatible) {
709 {
711 &stats_.dynamic_permutation_refinement_time);
712 tmp_dynamic_permutation_.AddMappings(base_singletons, image_singletons);
713 }
714 ScopedTimeDistributionUpdater u(&stats_.map_election_std_time);
715 min_potential_mismatching_part_index =
716 ss.min_potential_mismatching_part_index;
717 partitions_are_full_match = ConfirmFullMatchOrFindNextMappingDecision(
718 *base_partition, *image_partition, tmp_dynamic_permutation_,
719 &min_potential_mismatching_part_index, &next_base_node,
720 &next_image_node);
721 u.AlsoUpdate(partitions_are_full_match
722 ? &stats_.map_election_std_full_match_time
723 : &stats_.map_election_std_mapping_time);
724 }
725 if (compatible && partitions_are_full_match) {
726 DCHECK_EQ(min_potential_mismatching_part_index,
727 base_partition->NumParts());
728 // We have a permutation candidate!
729 // Note(user): we also deal with (extremely rare) false positives for
730 // "partitions_are_full_match" here: in case they aren't a full match,
731 // IsGraphAutomorphism() will catch that; and we'll simply deepen the
732 // search.
733 bool is_automorphism = true;
734 {
735 ScopedTimeDistributionUpdater u(&stats_.automorphism_test_time);
736 is_automorphism = IsGraphAutomorphism(tmp_dynamic_permutation_);
737 u.AlsoUpdate(is_automorphism ? &stats_.automorphism_test_success_time
738 : &stats_.automorphism_test_fail_time);
739 }
740 if (is_automorphism) {
741 ScopedTimeDistributionUpdater u(&stats_.search_finalize_time);
742 // We found a valid permutation. We can return it, but first we
743 // must restore the partitions to their original state.
744 std::unique_ptr<SparsePermutation> sparse_permutation(
745 tmp_dynamic_permutation_.CreateSparsePermutation());
746 VLOG(4) << "Automorphism found: " << sparse_permutation->DebugString();
747 const int base_num_parts =
748 search_states_[0].num_parts_before_trying_to_map_base_node;
749 base_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
750 image_partition->UndoRefineUntilNumPartsEqual(base_num_parts);
751 tmp_dynamic_permutation_.Reset();
752 search_states_.clear();
753
754 search_time_updater.AlsoUpdate(&stats_.search_time_success);
755 return sparse_permutation;
756 }
757
758 // The permutation isn't a valid automorphism. Either the partitions were
759 // fully refined, and we deem them incompatible, or they weren't, and we
760 // consider them as 'not a full match'.
761 VLOG(4) << "Permutation candidate isn't a valid automorphism.";
762 if (base_partition->NumParts() == NumNodes()) {
763 // Fully refined: the partitions are incompatible.
764 compatible = false;
765 ScopedTimeDistributionUpdater u(&stats_.dynamic_permutation_undo_time);
766 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
767 } else {
768 ScopedTimeDistributionUpdater u(&stats_.map_reelection_time);
769 // TODO(user, viger): try to get the non-singleton part from
770 // DynamicPermutation in O(1). On some graphs like the symmetry of the
771 // mip problem lectsched-4-obj.mps.gz, this take the majority of the
772 // time!
773 int non_singleton_part = 0;
774 {
775 ScopedTimeDistributionUpdater u(&stats_.non_singleton_search_time);
776 while (base_partition->SizeOfPart(non_singleton_part) == 1) {
777 ++non_singleton_part;
778 DCHECK_LT(non_singleton_part, base_partition->NumParts());
779 }
780 }
781 time_limit_->AdvanceDeterministicTime(
782 1e-9 * static_cast<double>(non_singleton_part));
783
784 // The partitions are compatible, but we'll deepen the search on some
785 // non-singleton part. We can pick any base and image node in this case.
786 GetBestMapping(*base_partition, *image_partition, non_singleton_part,
787 &next_base_node, &next_image_node);
788 }
789 }
790
791 // Now we've fully diagnosed our partitions, and have already dealt with
792 // case 2). We're left to deal with 1) and 3).
793 //
794 // Case 1): partitions are incompatible.
795 if (!compatible) {
796 ScopedTimeDistributionUpdater u(&stats_.backtracking_time);
797 // We invalidate the current image node, and prune the remaining image
798 // nodes. We might be left with no other image nodes, which means that
799 // we'll backtrack, i.e. pop our current SearchState and invalidate the
800 // 'current' image node of the upper SearchState (which might lead to us
801 // backtracking it, and so on).
802 while (!search_states_.empty()) {
803 SearchState* const last_ss = &search_states_.back();
804 image_partition->UndoRefineUntilNumPartsEqual(
805 last_ss->num_parts_before_trying_to_map_base_node);
806 if (last_ss->first_image_node >= 0) {
807 // Find out and prune the remaining potential image nodes: there is
808 // no permutation that maps base_node -> image_node that is
809 // compatible with the current partition, so there can't be a
810 // permutation that maps base_node -> X either, for all X in the orbit
811 // of 'image_node' under valid permutations compatible with the
812 // current partition. Ditto for other potential image nodes.
813 //
814 // TODO(user): fix this: we should really be collecting all
815 // permutations displacing any node in "image_part", for the pruning
816 // to be really exhaustive. We could also consider alternative ways,
817 // like incrementally maintaining the list of permutations compatible
818 // with the partition so far.
819 const int part = image_partition->PartOf(last_ss->first_image_node);
820 last_ss->remaining_pruned_image_nodes.reserve(
821 image_partition->SizeOfPart(part));
822 last_ss->remaining_pruned_image_nodes.push_back(
823 last_ss->first_image_node);
824 for (const int e : image_partition->ElementsInPart(part)) {
825 if (e != last_ss->first_image_node) {
826 last_ss->remaining_pruned_image_nodes.push_back(e);
827 }
828 }
829 {
830 ScopedTimeDistributionUpdater u(&stats_.pruning_time);
831 PruneOrbitsUnderPermutationsCompatibleWithPartition(
832 *image_partition, generators_found_so_far,
833 permutations_displacing_node[last_ss->first_image_node],
834 &last_ss->remaining_pruned_image_nodes);
835 }
836 SwapFrontAndBack(&last_ss->remaining_pruned_image_nodes);
837 DCHECK_EQ(last_ss->remaining_pruned_image_nodes.back(),
838 last_ss->first_image_node);
839 last_ss->first_image_node = -1;
840 }
841 last_ss->remaining_pruned_image_nodes.pop_back();
842 if (!last_ss->remaining_pruned_image_nodes.empty()) break;
843
844 VLOG(4) << "Backtracking one level up.";
845 base_partition->UndoRefineUntilNumPartsEqual(
846 last_ss->num_parts_before_trying_to_map_base_node);
847 // If this was the root search state (i.e. we fully backtracked and
848 // will exit the search after that), we don't have mappings to undo.
849 // We run UndoLastMappings() anyway, because it's a no-op in that case.
850 tmp_dynamic_permutation_.UndoLastMappings(&base_singletons);
851 search_states_.pop_back();
852 }
853 // Continue the search.
854 continue;
855 }
856
857 // Case 3): we deepen the search.
858 // Since the search loop starts from an already-refined base_partition,
859 // we must do it here.
860 VLOG(4) << " Deepening the search.";
861 search_states_.emplace_back(
862 next_base_node, next_image_node,
863 /*num_parts_before_trying_to_map_base_node*/ base_partition->NumParts(),
864 min_potential_mismatching_part_index);
865 {
866 ScopedTimeDistributionUpdater u(&stats_.search_refine_time);
867 DistinguishNodeInPartition(next_base_node, base_partition,
868 &base_singletons);
869 }
870 }
871 // We exhausted the search; we didn't find any permutation.
872 search_time_updater.AlsoUpdate(&stats_.search_time_fail);
873 return nullptr;
874}
875
877GraphSymmetryFinder::TailsOfIncomingArcsTo(int node) const {
879 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node],
880 flattened_reverse_adj_lists_.begin() + reverse_adj_list_index_[node + 1]);
881}
882
883void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition(
884 const DynamicPartition& partition,
885 const std::vector<std::unique_ptr<SparsePermutation>>& permutations,
886 const std::vector<int>& permutation_indices, std::vector<int>* nodes) {
887 VLOG(4) << " Pruning [" << absl::StrJoin(*nodes, ", ") << "]";
888 // TODO(user): apply a smarter test to decide whether to do the pruning
889 // or not: we can accurately estimate the cost of pruning (iterate through
890 // all generators found so far) and its estimated benefit (the cost of
891 // the search below the state that we're currently in, times the expected
892 // number of pruned nodes). Sometimes it may be better to skip the
893 // pruning.
894 if (nodes->size() <= 1) return;
895
896 // Iterate on all targeted permutations. If they are compatible, apply
897 // them to tmp_partition_ which will contain the incrementally merged
898 // equivalence classes.
899 std::vector<int>& tmp_nodes_on_support =
900 tmp_stack_; // Rename, for readability.
901 DCHECK(tmp_nodes_on_support.empty());
902 // TODO(user): investigate further optimizations: maybe it's possible
903 // to incrementally maintain the set of permutations that is compatible
904 // with the current partition, instead of recomputing it here?
905 for (const int p : permutation_indices) {
906 const SparsePermutation& permutation = *permutations[p];
907 // First, a quick compatibility check: the permutation's cycles must be
908 // smaller or equal to the size of the part that they are included in.
909 bool compatible = true;
910 for (int c = 0; c < permutation.NumCycles(); ++c) {
911 const SparsePermutation::Iterator cycle = permutation.Cycle(c);
912 if (cycle.size() >
913 partition.SizeOfPart(partition.PartOf(*cycle.begin()))) {
914 compatible = false;
915 break;
916 }
917 }
918 if (!compatible) continue;
919 // Now the full compatibility check: each cycle of the permutation must
920 // be fully included in an image part.
921 for (int c = 0; c < permutation.NumCycles(); ++c) {
922 int part = -1;
923 for (const int node : permutation.Cycle(c)) {
924 if (partition.PartOf(node) != part) {
925 if (part >= 0) {
926 compatible = false;
927 break;
928 }
929 part = partition.PartOf(node); // Initilization of 'part'.
930 }
931 }
932 }
933 if (!compatible) continue;
934 // The permutation is fully compatible!
935 // TODO(user): ignore cycles that are outside of image_part.
936 MergeNodeEquivalenceClassesAccordingToPermutation(permutation,
937 &tmp_partition_, nullptr);
938 for (const int node : permutation.Support()) {
939 if (!tmp_node_mask_[node]) {
940 tmp_node_mask_[node] = true;
941 tmp_nodes_on_support.push_back(node);
942 }
943 }
944 }
945
946 // Apply the pruning.
947 tmp_partition_.KeepOnlyOneNodePerPart(nodes);
948
949 // Reset the "tmp_" structures sparsely.
950 for (const int node : tmp_nodes_on_support) {
951 tmp_node_mask_[node] = false;
952 tmp_partition_.ResetNode(node);
953 }
954 tmp_nodes_on_support.clear();
955 VLOG(4) << " Pruned: [" << absl::StrJoin(*nodes, ", ") << "]";
956}
957
958bool GraphSymmetryFinder::ConfirmFullMatchOrFindNextMappingDecision(
959 const DynamicPartition& base_partition,
960 const DynamicPartition& image_partition,
961 const DynamicPermutation& current_permutation_candidate,
962 int* min_potential_mismatching_part_index_io, int* next_base_node,
963 int* next_image_node) const {
964 *next_base_node = -1;
965 *next_image_node = -1;
966
967 // The following clause should be true most of the times, except in some
968 // specific use cases.
969 if (!absl::GetFlag(FLAGS_minimize_permutation_support_size)) {
970 // First, we try to map the loose ends of the current permutations: these
971 // loose ends can't be mapped to themselves, so we'll have to map them to
972 // something anyway.
973 for (const int loose_node : current_permutation_candidate.LooseEnds()) {
974 DCHECK_GT(base_partition.ElementsInSamePartAs(loose_node).size(), 1);
975 *next_base_node = loose_node;
976 const int root = current_permutation_candidate.RootOf(loose_node);
977 DCHECK_NE(root, loose_node);
978 if (image_partition.PartOf(root) == base_partition.PartOf(loose_node)) {
979 // We prioritize mapping a loose end to its own root (i.e. close a
980 // cycle), if possible, like here: we exit immediately.
981 *next_image_node = root;
982 return false;
983 }
984 }
985 if (*next_base_node != -1) {
986 // We found loose ends, but none that mapped to its own root. Just pick
987 // any valid image.
988 *next_image_node =
989 *image_partition
990 .ElementsInPart(base_partition.PartOf(*next_base_node))
991 .begin();
992 return false;
993 }
994 }
995
996 // If there is no loose node (i.e. the current permutation only has closed
997 // cycles), we fall back to picking any part that is different in the base and
998 // image partitions; because we know that some mapping decision will have to
999 // be made there.
1000 // SUBTLE: we use "min_potential_mismatching_part_index_io" to incrementally
1001 // keep running this search (for a mismatching part) from where we left off.
1002 // TODO(user): implement a simpler search for a mismatching part: it's
1003 // trivially possible if the base partition maintains a hash set of all
1004 // Fprints of its parts, and if the image partition uses that to maintain the
1005 // list of 'different' non-singleton parts.
1006 const int initial_min_potential_mismatching_part_index =
1007 *min_potential_mismatching_part_index_io;
1008 for (; *min_potential_mismatching_part_index_io < base_partition.NumParts();
1009 ++*min_potential_mismatching_part_index_io) {
1010 const int p = *min_potential_mismatching_part_index_io;
1011 if (base_partition.SizeOfPart(p) != 1 &&
1012 base_partition.FprintOfPart(p) != image_partition.FprintOfPart(p)) {
1013 GetBestMapping(base_partition, image_partition, p, next_base_node,
1014 next_image_node);
1015 return false;
1016 }
1017
1018 const int parent = base_partition.ParentOfPart(p);
1019 if (parent < initial_min_potential_mismatching_part_index &&
1020 base_partition.SizeOfPart(parent) != 1 &&
1021 base_partition.FprintOfPart(parent) !=
1022 image_partition.FprintOfPart(parent)) {
1023 GetBestMapping(base_partition, image_partition, parent, next_base_node,
1024 next_image_node);
1025 return false;
1026 }
1027 }
1028
1029 // We didn't find an unequal part. DCHECK that our "incremental" check was
1030 // actually correct and that all non-singleton parts are indeed equal.
1031 if (DEBUG_MODE) {
1032 for (int p = 0; p < base_partition.NumParts(); ++p) {
1033 if (base_partition.SizeOfPart(p) != 1) {
1034 CHECK_EQ(base_partition.FprintOfPart(p),
1035 image_partition.FprintOfPart(p));
1036 }
1037 }
1038 }
1039 return true;
1040}
1041
1042std::string GraphSymmetryFinder::SearchState::DebugString() const {
1043 return absl::StrFormat(
1044 "SearchState{ base_node=%d, first_image_node=%d,"
1045 " remaining_pruned_image_nodes=[%s],"
1046 " num_parts_before_trying_to_map_base_node=%d }",
1047 base_node, first_image_node,
1048 absl::StrJoin(remaining_pruned_image_nodes, " "),
1049 num_parts_before_trying_to_map_base_node);
1050}
1051
1052} // namespace operations_research
int64 max
Definition: alldiff_cst.cc:139
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:697
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:890
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:884
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
#define VLOG(verboselevel)
Definition: base/logging.h:978
IterablePart ElementsInPart(int i) const
void Refine(const std::vector< int > &distinguished_subset)
void UndoRefineUntilNumPartsEqual(int original_num_parts)
IterablePart ElementsInSamePartAs(int i) const
std::string DebugString(DebugStringSorting sorting) const
const std::vector< int > & ElementsInHierarchicalOrder() const
std::unique_ptr< SparsePermutation > CreateSparsePermutation() const
void UndoLastMappings(std::vector< int > *undone_mapping_src)
void AddMappings(const std::vector< int > &src, const std::vector< int > &dst)
const std::vector< int > & AllMappingsSrc() const
void RecursivelyRefinePartitionByAdjacency(int first_unrefined_part_index, DynamicPartition *partition)
bool IsGraphAutomorphism(const DynamicPermutation &permutation) const
void DistinguishNodeInPartition(int node, DynamicPartition *partition, std::vector< int > *new_singletons_or_null)
absl::Status FindSymmetries(std::vector< int > *node_equivalence_classes_io, std::vector< std::unique_ptr< SparsePermutation > > *generators, std::vector< int > *factorized_automorphism_group_size, TimeLimit *time_limit=nullptr)
GraphSymmetryFinder(const Graph &graph, bool is_undirected)
int MergePartsOf(int node1, int node2)
int FillEquivalenceClasses(std::vector< int > *node_equivalence_classes)
void KeepOnlyOneNodePerPart(std::vector< int > *nodes)
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
bool LimitReached()
Returns true when the external limit is true, or the deterministic time is over the deterministic lim...
Definition: time_limit.h:532
void AdvanceDeterministicTime(double deterministic_duration)
Advances the deterministic time.
Definition: time_limit.h:226
ArcIndexType num_arcs() const
Definition: graph.h:205
NodeIndexType num_nodes() const
Definition: graph.h:202
IntegerRange< NodeIndex > AllNodes() const
Definition: graph.h:935
NodeIndexType Head(ArcIndexType arc) const
Definition: graph.h:1313
BeginEndWrapper< OutgoingArcIterator > OutgoingArcs(NodeIndexType node) const
SharedTimeLimit * time_limit
ABSL_FLAG(bool, minimize_permutation_support_size, false, "Tweak the algorithm to try and minimize the support size" " of the generators produced. This may negatively impact the" " performance, but works great on the sat_holeXXX benchmarks" " to reduce the support size.")
int64_t int64
const int INFO
Definition: log_severity.h:31
const bool DEBUG_MODE
Definition: macros.h:24
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:434
bool GraphIsSymmetric(const Graph &graph)
Definition: graph/util.h:217
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
std::vector< int >::const_iterator begin() const