Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
synchronization.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <sys/types.h>
17
18#include <algorithm>
19#include <atomic>
20#include <cctype>
21#include <cmath>
22#include <cstddef>
23#include <cstdint>
24#include <ctime>
25#include <deque>
26#include <functional>
27#include <limits>
28#include <memory>
29#include <string>
30#include <utility>
31#include <vector>
32
33#include "absl/hash/hash.h"
34#include "absl/time/time.h"
36#include "ortools/base/timer.h"
37#if !defined(__PORTABLE_PLATFORM__)
40#endif // __PORTABLE_PLATFORM__
41#include "absl/algorithm/container.h"
42#include "absl/container/btree_map.h"
43#include "absl/container/flat_hash_map.h"
44#include "absl/container/flat_hash_set.h"
45#include "absl/flags/flag.h"
46#include "absl/log/check.h"
47#include "absl/strings/str_cat.h"
48#include "absl/strings/str_format.h"
49#include "absl/strings/string_view.h"
50#include "absl/synchronization/mutex.h"
51#include "absl/types/span.h"
53#include "ortools/sat/cp_model.pb.h"
56#include "ortools/sat/model.h"
57#include "ortools/sat/sat_parameters.pb.h"
59#include "ortools/sat/util.h"
60#include "ortools/util/bitset.h"
64
65ABSL_FLAG(bool, cp_model_dump_solutions, false,
66 "DEBUG ONLY. If true, all the intermediate solution will be dumped "
67 "under '\"FLAGS_cp_model_dump_prefix\" + \"solution_xxx.pb.txt\"'.");
68ABSL_FLAG(bool, cp_model_dump_tightened_models, false,
69 "DEBUG ONLY. If true, dump tightened models incoporating all bounds "
70 "changes under '\"FLAGS_cp_model_dump_prefix\" + "
71 "\"tight_model_xxx.pb.txt\"'.");
72
73namespace operations_research {
74namespace sat {
75
77 std::vector<double> lp_solution) {
78 if (lp_solution.empty()) return;
79
80 // Add this solution to the pool.
81 auto solution =
82 std::make_shared<SharedSolutionRepository<double>::Solution>();
83 solution->variable_values = std::move(lp_solution);
84
85 // We always prefer to keep the solution from the last synchronize batch.
86 {
87 absl::MutexLock mutex_lock(&mutex_);
88 solution->rank = -num_synchronization_;
89 ++num_added_;
90 new_solutions_.push_back(solution);
91 }
92}
93
95 const std::vector<double>& lp_solution) {
96 absl::MutexLock mutex_lock(&mutex_);
97 ++num_added_;
98 solutions_.push_back(lp_solution);
99 if (solutions_.size() > 100) solutions_.pop_front();
100}
101
103 absl::MutexLock mutex_lock(&mutex_);
104 return !solutions_.empty();
105}
106
108 absl::MutexLock mutex_lock(&mutex_);
109 if (solutions_.empty()) return {};
110
111 ++num_queried_;
112 std::vector<double> solution = std::move(solutions_.back());
113 solutions_.pop_back();
114 return solution;
115}
116
118 : parameters_(*model->GetOrCreate<SatParameters>()),
119 wall_timer_(*model->GetOrCreate<WallTimer>()),
120 shared_time_limit_(model->GetOrCreate<ModelSharedTimeLimit>()),
121 solutions_(parameters_.solution_pool_size(), "feasible solutions"),
122 logger_(model->GetOrCreate<SolverLogger>()) {
123 bounds_logging_id_ = logger_->GetNewThrottledId();
124}
125
126namespace {
127
128std::string ProgressMessage(absl::string_view event_or_solution_count,
129 double time_in_seconds, double obj_best,
130 double obj_lb, double obj_ub,
131 absl::string_view solution_info) {
132 const std::string obj_next =
133 obj_lb <= obj_ub ? absl::StrFormat("next:[%.9g,%.9g]", obj_lb, obj_ub)
134 : "next:[]";
135 return absl::StrFormat("#%-5s %6.2fs best:%-5.9g %-15s %s",
136 event_or_solution_count, time_in_seconds, obj_best,
137 obj_next, solution_info);
138}
139
140std::string SatProgressMessage(const std::string& event_or_solution_count,
141 double time_in_seconds,
142 const std::string& solution_info) {
143 return absl::StrFormat("#%-5s %6.2fs %s", event_or_solution_count,
144 time_in_seconds, solution_info);
145}
146
147} // namespace
148
150 Model* model, CpSolverResponse* response) {
151 if (model == nullptr) return;
152 absl::MutexLock mutex_lock(&mutex_);
153 for (const auto& set_stats : statistics_postprocessors_) {
154 set_stats(model, response);
155 }
156}
157
158void SharedResponseManager::LogMessage(absl::string_view prefix,
159 absl::string_view message) {
160 absl::MutexLock mutex_lock(&mutex_);
161 SOLVER_LOG(logger_, absl::StrFormat("#%-5s %6.2fs %s", prefix,
162 wall_timer_.Get(), message));
163}
164
166 const std::string& prefix, const std::string& message) {
167 absl::MutexLock mutex_lock(&mutex_);
168
169 int id;
170 auto it = throttling_ids_.find(prefix);
171 if (it == throttling_ids_.end()) {
172 id = throttling_ids_[prefix] = logger_->GetNewThrottledId();
173 } else {
174 id = it->second;
175 }
176 logger_->ThrottledLog(id, absl::StrFormat("#%-5s %6.2fs %s", prefix,
177 wall_timer_.Get(), message));
178}
179
181 absl::MutexLock mutex_lock(&mutex_);
182
183 return logger_->LoggingIsEnabled();
184}
185
186void SharedResponseManager::InitializeObjective(const CpModelProto& cp_model) {
187 if (cp_model.has_objective()) {
188 objective_or_null_ = &cp_model.objective();
189 const Domain domain = ReadDomainFromProto(cp_model.objective());
190 if (!domain.IsEmpty()) {
191 UpdateInnerObjectiveBounds("initial_domain", IntegerValue(domain.Min()),
192 IntegerValue(domain.Max()));
193 }
194 } else {
195 objective_or_null_ = nullptr;
196 }
197}
198
200 absl::MutexLock mutex_lock(&mutex_);
201 always_synchronize_ = always_synchronize;
202}
203
205 absl::MutexLock mutex_lock(&mutex_);
206 update_integral_on_each_change_ = set;
207}
208
210 absl::MutexLock mutex_lock(&mutex_);
211 UpdateGapIntegralInternal();
212}
213
214void SharedResponseManager::UpdateGapIntegralInternal() {
215 if (objective_or_null_ == nullptr) return;
216
217 const double current_time = shared_time_limit_->GetElapsedDeterministicTime();
218 const double time_delta = current_time - last_gap_integral_time_stamp_;
219
220 // We use the log of the absolute objective gap.
221 //
222 // Using the log should count no solution as just log(2*64) = 18, and
223 // otherwise just compare order of magnitude which seems nice. Also, It is
224 // more easy to compare the primal integral with the total time.
225 const CpObjectiveProto& obj = *objective_or_null_;
226 const double factor =
227 obj.scaling_factor() != 0.0 ? std::abs(obj.scaling_factor()) : 1.0;
228 const double bounds_delta = std::log(1 + factor * last_absolute_gap_);
229 gap_integral_ += time_delta * bounds_delta;
230
231 // Update with new value.
232 last_gap_integral_time_stamp_ = current_time;
233 last_absolute_gap_ =
234 std::max(0.0, static_cast<double>(inner_objective_upper_bound_) -
235 static_cast<double>(inner_objective_lower_bound_));
236}
237
239 const SatParameters& parameters) {
240 absl::MutexLock mutex_lock(&mutex_);
241 if (objective_or_null_ == nullptr) return;
242 absolute_gap_limit_ = parameters.absolute_gap_limit();
243 relative_gap_limit_ = parameters.relative_gap_limit();
244}
245
246void SharedResponseManager::TestGapLimitsIfNeeded() {
247 // This is called on each internal limit change, so it is a good place to
248 // update the integral. Note that this is not called at the end of the search
249 // though.
250 if (update_integral_on_each_change_) UpdateGapIntegralInternal();
251
252 // Abort if there is not limit set, if the gap is not defined or if we already
253 // proved optimality or infeasibility.
254 if (absolute_gap_limit_ == 0 && relative_gap_limit_ == 0) return;
255 if (best_solution_objective_value_ >= kMaxIntegerValue) return;
256 if (inner_objective_lower_bound_ <= kMinIntegerValue) return;
257 if (inner_objective_lower_bound_ > inner_objective_upper_bound_) return;
258
259 const CpObjectiveProto& obj = *objective_or_null_;
260 const double user_best =
261 ScaleObjectiveValue(obj, best_solution_objective_value_);
262 const double user_bound =
263 ScaleObjectiveValue(obj, inner_objective_lower_bound_);
264 const double gap = std::abs(user_best - user_bound);
265 if (gap <= absolute_gap_limit_) {
266 SOLVER_LOG(logger_, "Absolute gap limit of ", absolute_gap_limit_,
267 " reached.");
268 UpdateBestStatus(CpSolverStatus::OPTIMAL);
269
270 // Note(user): Some code path in single-thread assumes that the problem
271 // can only be solved when they have proven infeasibility and do not check
272 // the ProblemIsSolved() method. So we force a stop here.
273 if (always_synchronize_) shared_time_limit_->Stop();
274 }
275 if (gap / std::max(1.0, std::abs(user_best)) < relative_gap_limit_) {
276 SOLVER_LOG(logger_, "Relative gap limit of ", relative_gap_limit_,
277 " reached.");
278 UpdateBestStatus(CpSolverStatus::OPTIMAL);
279
280 // Same as above.
281 if (always_synchronize_) shared_time_limit_->Stop();
282 }
283}
284
286 const std::string& update_info, IntegerValue lb, IntegerValue ub) {
287 absl::MutexLock mutex_lock(&mutex_);
288 CHECK(objective_or_null_ != nullptr);
289
290 // The problem is already solved!
291 //
292 // TODO(user): A thread might not be notified right away that the new bounds
293 // that it is pushing make the problem infeasible. Fix that. For now we just
294 // abort early here to avoid logging the "#Done" message multiple times.
295 if (inner_objective_lower_bound_ > inner_objective_upper_bound_) {
296 return;
297 }
298
299 const bool ub_change = ub < inner_objective_upper_bound_;
300 const bool lb_change = lb > inner_objective_lower_bound_;
301 if (!lb_change && !ub_change) return;
302
303 if (lb_change) {
304 // When the improving problem is infeasible, it is possible to report
305 // arbitrary high inner_objective_lower_bound_. We make sure it never cross
306 // the current best solution, so that we always report globally valid lower
307 // bound.
308 DCHECK_LE(inner_objective_upper_bound_, best_solution_objective_value_);
309 inner_objective_lower_bound_ =
310 std::min(best_solution_objective_value_, lb.value());
311 }
312 if (ub_change) {
313 inner_objective_upper_bound_ = ub.value();
314 }
315
316 if (always_synchronize_) {
317 synchronized_inner_objective_lower_bound_ =
318 IntegerValue(inner_objective_lower_bound_);
319 synchronized_inner_objective_upper_bound_ =
320 IntegerValue(inner_objective_upper_bound_);
321 }
322
323 if (inner_objective_lower_bound_ > inner_objective_upper_bound_) {
324 if (best_status_ == CpSolverStatus::FEASIBLE ||
325 best_status_ == CpSolverStatus::OPTIMAL) {
326 UpdateBestStatus(CpSolverStatus::OPTIMAL);
327 } else {
328 UpdateBestStatus(CpSolverStatus::INFEASIBLE);
329 }
330 if (update_integral_on_each_change_) UpdateGapIntegralInternal();
331 SOLVER_LOG(logger_,
332 SatProgressMessage("Done", wall_timer_.Get(), update_info));
333 return;
334 }
335 if (logger_->LoggingIsEnabled() || !best_bound_callbacks_.empty()) {
336 const CpObjectiveProto& obj = *objective_or_null_;
337 const double best =
338 ScaleObjectiveValue(obj, best_solution_objective_value_);
339 double new_lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_);
340 if (lb_change) {
341 for (const auto& callback_entry : best_bound_callbacks_) {
342 callback_entry.second(new_lb);
343 }
344 }
345 if (logger_->LoggingIsEnabled()) {
346 double new_ub = ScaleObjectiveValue(obj, inner_objective_upper_bound_);
347 if (obj.scaling_factor() < 0) {
348 std::swap(new_lb, new_ub);
349 }
350 RegisterObjectiveBoundImprovement(update_info);
351 logger_->ThrottledLog(bounds_logging_id_,
352 ProgressMessage("Bound", wall_timer_.Get(), best,
353 new_lb, new_ub, update_info));
354 }
355 }
356 TestGapLimitsIfNeeded();
357}
358
359// Invariant: the status always start at UNKNOWN and can only evolve as follow:
360// UNKNOWN -> FEASIBLE -> OPTIMAL
361// UNKNOWN -> INFEASIBLE
363 const std::string& worker_info) {
364 absl::MutexLock mutex_lock(&mutex_);
365 if (best_status_ == CpSolverStatus::FEASIBLE ||
366 best_status_ == CpSolverStatus::OPTIMAL) {
367 // We also use this status to indicate that we enumerated all solutions to
368 // a feasible problem.
369 UpdateBestStatus(CpSolverStatus::OPTIMAL);
370
371 // We just proved that the best solution cannot be improved uppon, so we
372 // have a new lower bound.
373 inner_objective_lower_bound_ = best_solution_objective_value_;
374 if (update_integral_on_each_change_) UpdateGapIntegralInternal();
375 } else {
376 CHECK_EQ(num_solutions_, 0);
377 UpdateBestStatus(CpSolverStatus::INFEASIBLE);
378 }
379 SOLVER_LOG(logger_,
380 SatProgressMessage("Done", wall_timer_.Get(), worker_info));
381}
382
383void SharedResponseManager::AddUnsatCore(const std::vector<int>& core) {
384 absl::MutexLock mutex_lock(&mutex_);
385 unsat_cores_ = core;
386}
387
389 absl::MutexLock mutex_lock(&mutex_);
390 return synchronized_inner_objective_lower_bound_;
391}
392
394 absl::MutexLock mutex_lock(&mutex_);
395 return synchronized_inner_objective_upper_bound_;
396}
397
399 absl::MutexLock mutex_lock(&mutex_);
400 synchronized_inner_objective_lower_bound_ =
401 IntegerValue(inner_objective_lower_bound_);
402 synchronized_inner_objective_upper_bound_ =
403 IntegerValue(inner_objective_upper_bound_);
404 synchronized_best_status_ = best_status_;
405 if (solutions_.NumSolutions() > 0) {
406 first_solution_solvers_should_stop_ = true;
407 }
408 logger_->FlushPendingThrottledLogs();
409}
410
412 absl::MutexLock mutex_lock(&mutex_);
413 return IntegerValue(best_solution_objective_value_);
414}
415
417 absl::MutexLock mutex_lock(&mutex_);
418 return gap_integral_;
419}
420
422 std::function<void(std::vector<int64_t>*)> postprocessor) {
423 absl::MutexLock mutex_lock(&mutex_);
424 solution_postprocessors_.push_back(postprocessor);
425}
426
428 std::function<void(CpSolverResponse*)> postprocessor) {
429 absl::MutexLock mutex_lock(&mutex_);
430 postprocessors_.push_back(postprocessor);
431}
432
434 std::function<void(CpSolverResponse*)> postprocessor) {
435 absl::MutexLock mutex_lock(&mutex_);
436 final_postprocessors_.push_back(postprocessor);
437}
438
440 std::function<void(Model*, CpSolverResponse*)> postprocessor) {
441 absl::MutexLock mutex_lock(&mutex_);
442 statistics_postprocessors_.push_back(postprocessor);
443}
444
446 std::function<void(const CpSolverResponse&)> callback) {
447 absl::MutexLock mutex_lock(&mutex_);
448 const int id = next_callback_id_++;
449 callbacks_.emplace_back(id, std::move(callback));
450 return id;
451}
452
454 absl::MutexLock mutex_lock(&mutex_);
455 for (int i = 0; i < callbacks_.size(); ++i) {
456 if (callbacks_[i].first == callback_id) {
457 callbacks_.erase(callbacks_.begin() + i);
458 return;
459 }
460 }
461 LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
462}
463
465 std::function<std::string(const CpSolverResponse&)> callback) {
466 absl::MutexLock mutex_lock(&mutex_);
467 const int id = next_search_log_callback_id_++;
468 search_log_callbacks_.emplace_back(id, std::move(callback));
469 return id;
470}
471
473 absl::MutexLock mutex_lock(&mutex_);
474 for (int i = 0; i < search_log_callbacks_.size(); ++i) {
475 if (search_log_callbacks_[i].first == callback_id) {
476 search_log_callbacks_.erase(search_log_callbacks_.begin() + i);
477 return;
478 }
479 }
480 LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
481}
482
484 std::function<void(double)> callback) {
485 absl::MutexLock mutex_lock(&mutex_);
486 const int id = next_best_bound_callback_id_++;
487 best_bound_callbacks_.emplace_back(id, std::move(callback));
488 return id;
489}
490
492 absl::MutexLock mutex_lock(&mutex_);
493 for (int i = 0; i < best_bound_callbacks_.size(); ++i) {
494 if (best_bound_callbacks_[i].first == callback_id) {
495 best_bound_callbacks_.erase(best_bound_callbacks_.begin() + i);
496 return;
497 }
498 }
499 LOG(DFATAL) << "Callback id " << callback_id << " not registered.";
500}
501
502CpSolverResponse SharedResponseManager::GetResponseInternal(
503 absl::Span<const int64_t> variable_values,
504 const std::string& solution_info) {
505 CpSolverResponse result;
506 result.set_status(best_status_);
507 if (!unsat_cores_.empty()) {
508 DCHECK_EQ(best_status_, CpSolverStatus::INFEASIBLE);
509 result.mutable_sufficient_assumptions_for_infeasibility()->Assign(
510 unsat_cores_.begin(), unsat_cores_.end());
511 }
512 FillObjectiveValuesInResponse(&result);
513 result.set_solution_info(solution_info);
514
515 // Tricky: We copy the solution now for the case where MergeFrom() belows
516 // override it!
517 //
518 // TODO(user): Fix. This is messy, we should really just override stats not
519 // important things like solution or status with the MergeFrom() below.
520 if (best_status_ == CpSolverStatus::FEASIBLE ||
521 best_status_ == CpSolverStatus::OPTIMAL) {
522 result.mutable_solution()->Assign(variable_values.begin(),
523 variable_values.end());
524 }
525
526 // Note that we allow subsolver_responses_ to override the fields set above.
527 // That is the status, solution_info and objective values...
528 if (!subsolver_responses_.empty()) {
529 result.MergeFrom(subsolver_responses_.front());
530 }
531
532 if (result.status() == CpSolverStatus::FEASIBLE ||
533 result.status() == CpSolverStatus::OPTIMAL) {
534 // We need to copy the solution before we postsolve it.
535 std::vector<int64_t> solution(result.solution().begin(),
536 result.solution().end());
537 for (int i = solution_postprocessors_.size(); --i >= 0;) {
538 solution_postprocessors_[i](&solution);
539 }
540 result.mutable_solution()->Assign(solution.begin(), solution.end());
541 }
542
543 // Apply response postprocessor to set things like timing information.
544 for (int i = postprocessors_.size(); --i >= 0;) {
545 postprocessors_[i](&result);
546 }
547 return result;
548}
549
551 absl::MutexLock mutex_lock(&mutex_);
552 CpSolverResponse result;
553 if (solutions_.NumSolutions() == 0) {
554 result = GetResponseInternal({}, "");
555 } else {
556 std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
557 solution = solutions_.GetSolution(0);
558 result = GetResponseInternal(solution->variable_values, solution->info);
559 }
560 // If this is true, we postsolve and copy all of our solutions.
561 if (parameters_.fill_additional_solutions_in_response()) {
562 std::vector<int64_t> temp;
563 for (int i = 0; i < solutions_.NumSolutions(); ++i) {
564 std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
565 solution = solutions_.GetSolution(i);
566 temp = solution->variable_values;
567 for (int i = solution_postprocessors_.size(); --i >= 0;) {
568 solution_postprocessors_[i](&temp);
569 }
570 result.add_additional_solutions()->mutable_values()->Assign(temp.begin(),
571 temp.end());
572 }
573 }
574
575 // final postprocessors will print out the final log. They must be called
576 // last.
577 for (int i = final_postprocessors_.size(); --i >= 0;) {
578 final_postprocessors_[i](&result);
579 }
580
581 return result;
582}
583
585 const CpSolverResponse& response) {
586 absl::MutexLock mutex_lock(&mutex_);
587 return subsolver_responses_.push_back(response);
588}
589
590void SharedResponseManager::FillObjectiveValuesInResponse(
591 CpSolverResponse* response) const {
592 if (objective_or_null_ == nullptr) return;
593 const CpObjectiveProto& obj = *objective_or_null_;
594
595 if (best_status_ == CpSolverStatus::INFEASIBLE) {
596 response->clear_objective_value();
597 response->clear_best_objective_bound();
598 response->clear_inner_objective_lower_bound();
599 return;
600 }
601
602 // Set the objective value.
603 // If we don't have any solution, we use our inner bound.
604 if (best_status_ == CpSolverStatus::UNKNOWN) {
605 response->set_objective_value(
606 ScaleObjectiveValue(obj, inner_objective_upper_bound_));
607 } else {
608 response->set_objective_value(
609 ScaleObjectiveValue(obj, best_solution_objective_value_));
610 }
611
612 // Update the best bound in the response.
613 response->set_inner_objective_lower_bound(
614 ScaleInnerObjectiveValue(obj, inner_objective_lower_bound_));
615 response->set_best_objective_bound(
616 ScaleObjectiveValue(obj, inner_objective_lower_bound_));
617
618 // Update the primal integral.
619 response->set_gap_integral(gap_integral_);
620}
621
622std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution>
623SharedResponseManager::NewSolution(absl::Span<const int64_t> solution_values,
624 const std::string& solution_info,
625 Model* model) {
626 absl::MutexLock mutex_lock(&mutex_);
627 std::shared_ptr<const SharedSolutionRepository<int64_t>::Solution> ret;
628
629 // For SAT problems, we add the solution to the solution pool for retrieval
630 // later.
631 if (objective_or_null_ == nullptr) {
633 solution.variable_values.assign(solution_values.begin(),
634 solution_values.end());
635 solution.info = solution_info;
636 ret = solutions_.Add(solution);
637 } else {
638 const int64_t objective_value =
639 ComputeInnerObjective(*objective_or_null_, solution_values);
640
641 // Add this solution to the pool, even if it is not improving.
643 solution.variable_values.assign(solution_values.begin(),
644 solution_values.end());
645 solution.rank = objective_value;
646 solution.info = solution_info;
647 ret = solutions_.Add(solution);
648
649 // Ignore any non-strictly improving solution.
650 if (objective_value > inner_objective_upper_bound_) return ret;
651
652 // Our inner_objective_lower_bound_ should be a globally valid bound, until
653 // the problem become infeasible (i.e the lb > ub) in which case the bound
654 // is no longer globally valid. Here, because we have a strictly improving
655 // solution, we shouldn't be in the infeasible setting yet.
656 DCHECK_GE(objective_value, inner_objective_lower_bound_);
657
658 DCHECK_LT(objective_value, best_solution_objective_value_);
659 best_solution_objective_value_ = objective_value;
660
661 // Update the new bound.
662 inner_objective_upper_bound_ = objective_value - 1;
663 }
664
665 // In single thread, no one is synchronizing the solution manager, so we
666 // should do it from here.
667 if (always_synchronize_) {
668 solutions_.Synchronize();
669 first_solution_solvers_should_stop_ = true;
670 }
671
672 // Note that the objective will be filled by
673 // FillObjectiveValuesInResponse().
674 if (objective_or_null_ == nullptr && !parameters_.enumerate_all_solutions()) {
675 UpdateBestStatus(CpSolverStatus::OPTIMAL);
676 } else {
677 UpdateBestStatus(CpSolverStatus::FEASIBLE);
678 }
679
680 // Mark model as OPTIMAL if the inner bound crossed.
681 if (objective_or_null_ != nullptr &&
682 inner_objective_lower_bound_ > inner_objective_upper_bound_) {
683 UpdateBestStatus(CpSolverStatus::OPTIMAL);
684 }
685
686 // Logging.
687 ++num_solutions_;
688
689 // Compute the post-solved response once.
690 CpSolverResponse tmp_postsolved_response;
691 if ((!search_log_callbacks_.empty() && logger_->LoggingIsEnabled()) ||
692 !callbacks_.empty()) {
693 tmp_postsolved_response =
694 GetResponseInternal(solution_values, solution_info);
695
696 // Same as FillSolveStatsInResponse() but since we already hold the mutex...
697 if (model != nullptr && !statistics_postprocessors_.empty()) {
698 for (const auto& set_stats : statistics_postprocessors_) {
699 set_stats(model, &tmp_postsolved_response);
700 }
701 }
702 }
703
704 if (logger_->LoggingIsEnabled()) {
705 std::string solution_message = solution_info;
706 if (tmp_postsolved_response.num_booleans() > 0) {
707 absl::StrAppend(&solution_message, " (fixed_bools=",
708 tmp_postsolved_response.num_fixed_booleans(), "/",
709 tmp_postsolved_response.num_booleans(), ")");
710 }
711
712 if (!search_log_callbacks_.empty()) {
713 for (const auto& pair : search_log_callbacks_) {
714 absl::StrAppend(&solution_message, " ",
715 pair.second(tmp_postsolved_response));
716 }
717 }
718
719 if (objective_or_null_ != nullptr) {
720 const CpObjectiveProto& obj = *objective_or_null_;
721 const double best =
722 ScaleObjectiveValue(obj, best_solution_objective_value_);
723 double lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_);
724 double ub = ScaleObjectiveValue(obj, inner_objective_upper_bound_);
725 if (obj.scaling_factor() < 0) {
726 std::swap(lb, ub);
727 }
728 RegisterSolutionFound(solution_message, num_solutions_);
729 SOLVER_LOG(logger_, ProgressMessage(absl::StrCat(num_solutions_),
730 wall_timer_.Get(), best, lb, ub,
731 solution_message));
732 } else {
733 SOLVER_LOG(logger_,
734 SatProgressMessage(absl::StrCat(num_solutions_),
735 wall_timer_.Get(), solution_message));
736 }
737 }
738
739 // Call callbacks.
740 // Note that we cannot call function that try to get the mutex_ here.
741 TestGapLimitsIfNeeded();
742 for (const auto& pair : callbacks_) {
743 pair.second(tmp_postsolved_response);
744 }
745
746#if !defined(__PORTABLE_PLATFORM__)
747 // We protect solution dumping with log_updates as LNS subsolvers share
748 // another solution manager, and we do not want to dump those.
749 if (logger_->LoggingIsEnabled() &&
750 absl::GetFlag(FLAGS_cp_model_dump_solutions)) {
751 const std::string file =
752 absl::StrCat(dump_prefix_, "solution_", num_solutions_, ".pb.txt");
753 LOG(INFO) << "Dumping solution to '" << file << "'.";
754
755 // Note that here we only want to dump the non-post-solved solution.
756 // This is only used for debugging.
757 CpSolverResponse response;
758 response.mutable_solution()->Assign(solution_values.begin(),
759 solution_values.end());
760 CHECK_OK(file::SetTextProto(file, response, file::Defaults()));
761 }
762#endif // __PORTABLE_PLATFORM__
763
764 return ret;
765}
766
768 absl::MutexLock mutex_lock(&mutex_);
769 return synchronized_best_status_ == CpSolverStatus::OPTIMAL ||
770 synchronized_best_status_ == CpSolverStatus::INFEASIBLE;
771}
772
773void SharedResponseManager::UpdateBestStatus(const CpSolverStatus& status) {
774 best_status_ = status;
775 if (always_synchronize_) {
776 synchronized_best_status_ = status;
777 }
778}
779
780std::string ExtractSubSolverName(const std::string& improvement_info) {
781 if (improvement_info.empty()) return "";
782
783 // We assume the subsolver name is always first.
784 for (int i = 0; i < improvement_info.size(); ++i) {
785 if (!std::isalnum(improvement_info[i]) && improvement_info[i] != '_') {
786 return improvement_info.substr(0, i);
787 }
788 }
789
790 return improvement_info;
791}
792
793void SharedResponseManager::RegisterSolutionFound(
794 const std::string& improvement_info, int solution_rank) {
795 if (improvement_info.empty()) return;
796 const std::string subsolver_name = ExtractSubSolverName(improvement_info);
797 primal_improvements_count_[subsolver_name]++;
798 primal_improvements_min_rank_.insert({subsolver_name, solution_rank});
799 primal_improvements_max_rank_[subsolver_name] = solution_rank;
800}
801
802void SharedResponseManager::RegisterObjectiveBoundImprovement(
803 const std::string& improvement_info) {
804 if (improvement_info.empty() || improvement_info == "initial domain") return;
805 dual_improvements_count_[ExtractSubSolverName(improvement_info)]++;
806}
807
809 absl::MutexLock mutex_lock(&mutex_);
810 if (!primal_improvements_count_.empty()) {
811 std::vector<std::vector<std::string>> table;
812 table.push_back(
813 {absl::StrCat("Solutions (", num_solutions_, ")"), "Num", "Rank"});
814 for (const auto& entry : primal_improvements_count_) {
815 const int min_rank = primal_improvements_min_rank_[entry.first];
816 const int max_rank = primal_improvements_max_rank_[entry.first];
817 table.push_back({FormatName(entry.first), FormatCounter(entry.second),
818 absl::StrCat("[", min_rank, ",", max_rank, "]")});
819 }
820 SOLVER_LOG(logger_, FormatTable(table));
821 }
822 if (!dual_improvements_count_.empty()) {
823 std::vector<std::vector<std::string>> table;
824 table.push_back({"Objective bounds", "Num"});
825 for (const auto& entry : dual_improvements_count_) {
826 table.push_back({FormatName(entry.first), FormatCounter(entry.second)});
827 }
828 SOLVER_LOG(logger_, FormatTable(table));
829 }
830}
831
832SharedBoundsManager::SharedBoundsManager(const CpModelProto& model_proto)
833 : num_variables_(model_proto.variables_size()),
834 model_proto_(model_proto),
835 lower_bounds_(num_variables_, std::numeric_limits<int64_t>::min()),
836 upper_bounds_(num_variables_, std::numeric_limits<int64_t>::max()),
837 synchronized_lower_bounds_(num_variables_,
838 std::numeric_limits<int64_t>::min()),
839 synchronized_upper_bounds_(num_variables_,
840 std::numeric_limits<int64_t>::max()) {
841 changed_variables_since_last_synchronize_.ClearAndResize(num_variables_);
842 for (int i = 0; i < num_variables_; ++i) {
843 lower_bounds_[i] = model_proto.variables(i).domain(0);
844 const int domain_size = model_proto.variables(i).domain_size();
845 upper_bounds_[i] = model_proto.variables(i).domain(domain_size - 1);
846 synchronized_lower_bounds_[i] = lower_bounds_[i];
847 synchronized_upper_bounds_[i] = upper_bounds_[i];
848 }
849
850 // Fill symmetry data.
851 if (model_proto.has_symmetry()) {
852 const int num_vars = model_proto.variables().size();
853 std::vector<std::unique_ptr<SparsePermutation>> generators;
854 for (const SparsePermutationProto& perm :
855 model_proto.symmetry().permutations()) {
856 generators.emplace_back(CreateSparsePermutationFromProto(num_vars, perm));
857 }
858 if (generators.empty()) return;
859
860 // Get orbits in term of IntegerVariable.
861 var_to_orbit_index_ = GetOrbits(num_vars, generators);
862
863 // Fill orbits_.
864 std::vector<int> keys;
865 std::vector<int> values;
866 for (int var = 0; var < num_vars; ++var) {
867 const int orbit_index = var_to_orbit_index_[var];
868 if (orbit_index == -1) continue;
869 keys.push_back(orbit_index);
870 values.push_back(var);
871 }
872 if (keys.empty()) return;
873
874 has_symmetry_ = true;
875 orbits_.ResetFromFlatMapping(keys, values);
876
877 // Fill representative.
878 var_to_representative_.resize(num_vars);
879 for (int var = 0; var < num_vars; ++var) {
880 const int orbit_index = var_to_orbit_index_[var];
881 if (orbit_index == -1) {
882 var_to_representative_[var] = var;
883 } else {
884 var_to_representative_[var] = orbits_[orbit_index][0];
885 }
886 }
887 }
888}
889
891 const std::string& worker_name, absl::Span<const int> variables,
892 absl::Span<const int64_t> new_lower_bounds,
893 absl::Span<const int64_t> new_upper_bounds) {
894 CHECK_EQ(variables.size(), new_lower_bounds.size());
895 CHECK_EQ(variables.size(), new_upper_bounds.size());
896 int num_improvements = 0;
897 int num_symmetric_improvements = 0;
898
899 absl::MutexLock mutex_lock(&mutex_);
900 for (int i = 0; i < variables.size(); ++i) {
901 int var = variables[i];
902 if (var >= num_variables_) continue;
903
904 // In the presence of symmetry we only update the representative.
905 if (has_symmetry_) {
906 var = var_to_representative_[var];
907 }
908 const int64_t old_lb = lower_bounds_[var];
909 const int64_t old_ub = upper_bounds_[var];
910 const int64_t new_lb = new_lower_bounds[i];
911 const int64_t new_ub = new_upper_bounds[i];
912 const bool changed_lb = new_lb > old_lb;
913 const bool changed_ub = new_ub < old_ub;
914 if (!changed_lb && !changed_ub) continue;
915
916 VLOG(3) << worker_name << " var=" << var << " [" << old_lb << "," << old_ub
917 << "] -> [" << new_lb << "," << new_ub << "]";
918
919 if (changed_lb) {
920 if (DEBUG_MODE && !debug_solution_.empty()) {
921 CHECK_LE(new_lb, debug_solution_[var]) << worker_name << " var=" << var;
922 }
923 lower_bounds_[var] = new_lb;
924 }
925 if (changed_ub) {
926 if (DEBUG_MODE && !debug_solution_.empty()) {
927 CHECK_GE(new_ub, debug_solution_[var]) << worker_name << " var=" << var;
928 }
929 upper_bounds_[var] = new_ub;
930 }
931 changed_variables_since_last_synchronize_.Set(var);
932 num_improvements++;
933
934 if (has_symmetry_ && variables[i] != var) {
935 // We count -1 so that num_improvements + num_symmetric_improvements
936 // corresponds to the number of actual bound improvement.
937 num_symmetric_improvements +=
938 orbits_[var_to_orbit_index_[var]].size() - 1;
939 }
940 }
941 if (num_improvements > 0) {
942 total_num_improvements_ += num_improvements;
943 VLOG(3) << total_num_improvements_ << "/" << num_variables_;
944 bounds_exported_[worker_name].num_exported += num_improvements;
945 bounds_exported_[worker_name].num_symmetric += num_symmetric_improvements;
946 if (absl::GetFlag(FLAGS_cp_model_dump_tightened_models)) {
947 CpModelProto tight_model = model_proto_;
948 for (int i = 0; i < num_variables_; ++i) {
949 IntegerVariableProto* var_proto = tight_model.mutable_variables(i);
950
951 int rep = i;
952 if (has_symmetry_) rep = var_to_representative_[i];
953 const Domain domain = ReadDomainFromProto(*var_proto)
954 .IntersectionWith(Domain(lower_bounds_[rep],
955 upper_bounds_[rep]));
956 FillDomainInProto(domain, var_proto);
957 }
958 const std::string filename = absl::StrCat(dump_prefix_, "tighened_model_",
959 export_counter_, ".pb.txt");
960 LOG(INFO) << "Dumping tightened model proto to '" << filename << "'.";
961 export_counter_++;
962 CHECK(WriteModelProtoToFile(tight_model, filename));
963 }
964 }
965}
966
967// TODO(user): Because we look at the non-synchronized and up to date bounds,
968// this break determinism if two solution for the same subpart comes at the same
969// time.
971 absl::Span<const int64_t> solution,
972 absl::Span<const int> variables_to_fix) {
973 // This function shouldn't be called if we has symmetry.
974 CHECK(!has_symmetry_);
975 absl::MutexLock mutex_lock(&mutex_);
976
977 // Abort if incompatible. Note that we only check the position that we are
978 // about to fix. This should be enough. Otherwise we might never accept any
979 // solution because the base LNS solution was not the same in some of the
980 // variables that we fixed here.
981 for (const int var : variables_to_fix) {
982 const int64_t value = solution[var];
983 if (value < lower_bounds_[var] || value > upper_bounds_[var]) {
984 VLOG(1) << "Incompatibility in FixVariablesFromPartialSolution() "
985 << "var: " << var << " value: " << value << " bounds: ["
986 << lower_bounds_[var] << "," << upper_bounds_[var] << "]";
987 return;
988 }
989 }
990
991 // Fix the variables.
992 for (const int var : variables_to_fix) {
993 const int64_t old_lb = lower_bounds_[var];
994 const int64_t old_ub = upper_bounds_[var];
995 const bool changed_lb = solution[var] > old_lb;
996 const bool changed_ub = solution[var] < old_ub;
997 if (!changed_lb && !changed_ub) continue;
998
999 lower_bounds_[var] = solution[var];
1000 upper_bounds_[var] = solution[var];
1001 changed_variables_since_last_synchronize_.Set(var);
1002
1003 // This is problematic as we might find a different partial solution.
1004 // To allow for further investigation, we currently fix it to the debug
1005 // solution instead.
1006 if (DEBUG_MODE && !debug_solution_.empty()) {
1007 if (solution[var] != debug_solution_[var]) {
1008 LOG(INFO) << "Fixing to a different solution for var=" << var
1009 << " debug=" << debug_solution_[var]
1010 << " partial=" << solution[var];
1011 lower_bounds_[var] = debug_solution_[var];
1012 upper_bounds_[var] = debug_solution_[var];
1013 }
1014 }
1015 }
1016}
1017
1019 absl::MutexLock mutex_lock(&mutex_);
1020 for (const int var :
1021 changed_variables_since_last_synchronize_.PositionsSetAtLeastOnce()) {
1022 DCHECK(!has_symmetry_ || var_to_representative_[var] == var);
1023 synchronized_lower_bounds_[var] = lower_bounds_[var];
1024 synchronized_upper_bounds_[var] = upper_bounds_[var];
1025 for (int j = 0; j < id_to_changed_variables_.size(); ++j) {
1026 id_to_changed_variables_[j].Set(var);
1027 }
1028 }
1029 changed_variables_since_last_synchronize_.ClearAll();
1030}
1031
1033 absl::MutexLock mutex_lock(&mutex_);
1034 const int id = id_to_changed_variables_.size();
1035 id_to_changed_variables_.resize(id + 1);
1036 id_to_changed_variables_[id].ClearAndResize(num_variables_);
1037 for (int var = 0; var < num_variables_; ++var) {
1038 const int64_t lb = model_proto_.variables(var).domain(0);
1039 const int domain_size = model_proto_.variables(var).domain_size();
1040 const int64_t ub = model_proto_.variables(var).domain(domain_size - 1);
1041 if (lb != synchronized_lower_bounds_[var] ||
1042 ub != synchronized_upper_bounds_[var]) {
1043 DCHECK(!has_symmetry_ || var_to_representative_[var] == var);
1044 id_to_changed_variables_[id].Set(var);
1045 }
1046 }
1047 return id;
1048}
1049
1051 int id, std::vector<int>* variables, std::vector<int64_t>* new_lower_bounds,
1052 std::vector<int64_t>* new_upper_bounds) {
1053 variables->clear();
1054 new_lower_bounds->clear();
1055 new_upper_bounds->clear();
1056
1057 {
1058 absl::MutexLock mutex_lock(&mutex_);
1059 for (const int var :
1060 id_to_changed_variables_[id].PositionsSetAtLeastOnce()) {
1061 DCHECK(!has_symmetry_ || var_to_representative_[var] == var);
1062 variables->push_back(var);
1063 }
1064 id_to_changed_variables_[id].ClearAll();
1065
1066 // We need to report the bounds in a deterministic order as it is difficult
1067 // to guarantee that nothing depend on the order in which the new bounds are
1068 // processed.
1069 absl::c_sort(*variables);
1070 for (const int var : *variables) {
1071 new_lower_bounds->push_back(synchronized_lower_bounds_[var]);
1072 new_upper_bounds->push_back(synchronized_upper_bounds_[var]);
1073 }
1074 }
1075
1076 // Now that the mutex is released, we can add all symmetric version if any.
1077 // Note that alternatively we could do that in the client side, but the
1078 // complexity will be the same, we will just save some memory that is usually
1079 // just reused.
1080 if (has_symmetry_) {
1081 const int old_size = variables->size();
1082 for (int i = 0; i < old_size; ++i) {
1083 const int var = (*variables)[i];
1084 const int orbit_index = var_to_orbit_index_[var];
1085 if (orbit_index == -1) continue;
1086
1087 const int64_t lb = (*new_lower_bounds)[i];
1088 const int64_t ub = (*new_upper_bounds)[i];
1089 const auto orbit = orbits_[orbit_index];
1090 CHECK_EQ(var, orbit[0]);
1091 for (const int other : orbit.subspan(1)) {
1092 variables->push_back(other);
1093 new_lower_bounds->push_back(lb);
1094 new_upper_bounds->push_back(ub);
1095 }
1096 }
1097 }
1098}
1099
1100void SharedBoundsManager::UpdateDomains(std::vector<Domain>* domains) {
1101 absl::MutexLock mutex_lock(&mutex_);
1102 CHECK_EQ(domains->size(), synchronized_lower_bounds_.size());
1103 for (int var = 0; var < domains->size(); ++var) {
1104 (*domains)[var] = (*domains)[var].IntersectionWith(Domain(
1105 synchronized_lower_bounds_[var], synchronized_upper_bounds_[var]));
1106 }
1107}
1108
1110 absl::MutexLock mutex_lock(&mutex_);
1111 if (!bounds_exported_.empty()) {
1112 std::vector<std::vector<std::string>> table;
1113 table.push_back({"Improving bounds shared", "Num", "Sym"});
1114 for (const auto& entry : bounds_exported_) {
1115 table.push_back({FormatName(entry.first),
1116 FormatCounter(entry.second.num_exported),
1117 FormatCounter(entry.second.num_symmetric)});
1118 }
1119 SOLVER_LOG(logger, FormatTable(table));
1120 }
1121}
1122
1123int SharedBoundsManager::NumBoundsExported(absl::string_view worker_name) {
1124 absl::MutexLock mutex_lock(&mutex_);
1125 const auto it = bounds_exported_.find(worker_name);
1126 if (it == bounds_exported_.end()) return 0;
1127 return it->second.num_exported;
1128}
1129
1131 for (auto& buffer : clauses_by_size_) {
1132 buffer.reserve(kMaxBufferedLiterals);
1133 }
1134}
1135
1136bool UniqueClauseStream::Add(absl::Span<const int> clause) {
1137 absl::MutexLock mutex_lock(&mutex_);
1138 if (clause.size() > kMaxClauseSize || clause.size() <= 2) return false;
1139 // This is just a safety check, the caller should have called CanAccept().
1140 if (NumLiteralsOfSize(clause.size()) + clause.size() > kMaxBufferedLiterals) {
1141 return false;
1142 }
1143 if (BlockClause(clause)) {
1144 std::vector<int>* buffer = MutableBufferForSize(clause.size());
1145 buffer->insert(buffer->end(), clause.begin(), clause.end());
1146 return true;
1147 }
1148 return false;
1149}
1150
1151bool UniqueClauseStream::BlockClause(absl::Span<const int> clause) {
1152 if (clause.size() > kMaxClauseSize) return false;
1153 if (clause.size() <= 2) return false;
1154 return fingerprints_.emplace(HashClause(clause)).second;
1155}
1156
1157bool UniqueClauseStream::Delete(absl::Span<const int> clause) {
1158 const size_t fingerprint = HashClause(clause);
1159 absl::MutexLock mutex_lock(&mutex_);
1160 // Note a clause with this hash may be buffered, but not yet exported.
1161 return fingerprints_.erase(fingerprint) == 1;
1162}
1163
1167 int to_fill = kMaxLiteralsPerBatch;
1168 absl::MutexLock mutex_lock(&mutex_);
1169 for (int size = kMinClauseSize; size <= kMaxClauseSize; ++size) {
1170 CHECK_EQ(NumLiteralsOfSize(size) % size, 0);
1171 while (to_fill >= size && NumLiteralsOfSize(size) > 0) {
1172 absl::Span<const int> clause = NextClause(size);
1173 if (fingerprints_.contains(HashClause(clause))) {
1174 buffer.Add(NextClause(size));
1175 to_fill -= size;
1176 }
1177 PopClause(size);
1178 }
1179 }
1180 return buffer;
1181}
1182
1184 int size,
1185 int max_clauses_to_export) {
1186 int num_exported_clauses = 0;
1187 absl::MutexLock mutex_lock(&mutex_);
1188 while (NumLiteralsOfSize(size) > 0 &&
1189 num_exported_clauses < max_clauses_to_export) {
1190 absl::Span<const int> clause = NextClause(size);
1191 // Don't emit deleted clauses.
1192 if (fingerprints_.contains(HashClause(clause)) && upstream.Add(clause)) {
1193 ++num_exported_clauses;
1194 }
1195 PopClause(size);
1196 }
1197 return num_exported_clauses;
1198}
1199
1201 absl::MutexLock mutex_lock(&mutex_);
1202 int result = 0;
1203 for (const auto& buffer : clauses_by_size_) {
1204 result += buffer.size();
1205 }
1206 return result;
1207}
1208
1209bool UniqueClauseStream::CanAccept(int size, int lbd) const {
1210 if (size <= 2 || size > kMaxClauseSize) return false;
1211 absl::MutexLock mutex_lock(&mutex_);
1212 if (lbd > lbd_threshold_) return false;
1213 int num_literals_up_to_size = 0;
1214 for (int i = kMinClauseSize; i <= size; ++i) {
1215 num_literals_up_to_size += NumLiteralsOfSize(i);
1216 }
1217 return num_literals_up_to_size + size <= kMaxBufferedLiterals;
1218}
1219
1221 absl::MutexLock mutex_lock(&mutex_);
1222 int literals_to_remove = 0;
1223 for (const auto& buffer : clauses_by_size_) {
1224 literals_to_remove += buffer.size();
1225 }
1226 literals_to_remove -= kMaxBufferedLiterals;
1227 for (int size = kMaxClauseSize; size >= kMinClauseSize; --size) {
1228 while (NumLiteralsOfSize(size) > 0) {
1229 // Stop if removing one more clause of the current size would
1230 // leave the buffer under full. Otherwise we might remove a shorter
1231 // clause later!
1232 if (literals_to_remove < size) return;
1233 fingerprints_.erase(HashClause(NextClause(size)));
1234 PopClause(size);
1235 literals_to_remove -= size;
1236 }
1237 }
1238}
1239
1241 absl::MutexLock mutex_lock(&mutex_);
1242 lbd_threshold_ = lbd;
1243}
1244
1245size_t UniqueClauseStream::HashClause(absl::Span<const int> clause,
1246 size_t hash_seed) {
1247 size_t hash = absl::HashOf(hash_seed, clause.size());
1248 for (int i = 0; i < clause.size(); ++i) {
1249 hash ^= absl::HashOf(clause[i], hash_seed);
1250 }
1251 return hash;
1252}
1253
1254absl::Span<const int> UniqueClauseStream::NextClause(int size) const {
1255 absl::Span<const int> buffer = BufferForSize(size);
1256 return buffer.subspan(buffer.size() - size, size);
1257}
1258
1259void UniqueClauseStream::PopClause(int size) {
1260 std::vector<int>* buffer = MutableBufferForSize(size);
1261 buffer->erase(buffer->end() - size, buffer->end());
1262}
1263
1264int UniqueClauseStream::NumClausesOfSize(int size) const {
1265 return NumLiteralsOfSize(size) / size;
1266}
1267
1268int UniqueClauseStream::NumLiteralsOfSize(int size) const {
1269 return BufferForSize(size).size();
1270}
1271
1273 absl::Duration share_frequency)
1274 : always_synchronize_(always_synchronize),
1275 share_frequency_(share_frequency) {}
1276
1278 absl::MutexLock mutex_lock(&mutex_);
1279 const int id = id_to_last_processed_binary_clause_.size();
1280 id_to_last_processed_binary_clause_.resize(id + 1, 0);
1281 id_to_last_returned_batch_.resize(id + 1, 0);
1282 id_to_last_finished_batch_.resize(id + 1, 0);
1283 id_to_clauses_exported_.resize(id + 1, 0);
1284 id_to_clause_stream_.emplace_back();
1285 return id;
1286}
1287
1289 absl::string_view worker_name) {
1290 absl::MutexLock mutex_lock(&mutex_);
1291 id_to_worker_name_[id] = worker_name;
1292}
1293
1294void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) {
1295 if (lit2 < lit1) std::swap(lit1, lit2);
1296 const auto p = std::make_pair(lit1, lit2);
1297
1298 absl::MutexLock mutex_lock(&mutex_);
1299 const auto [unused_it, inserted] = added_binary_clauses_set_.insert(p);
1300 if (inserted) {
1301 added_binary_clauses_.push_back(p);
1302 if (always_synchronize_) ++last_visible_binary_clause_;
1303 id_to_clauses_exported_[id]++;
1304
1305 // Small optim. If the worker is already up to date with clauses to import,
1306 // we can mark this new clause as already seen.
1307 if (id_to_last_processed_binary_clause_[id] ==
1308 added_binary_clauses_.size() - 1) {
1309 id_to_last_processed_binary_clause_[id]++;
1310 }
1311 }
1312}
1313
1314std::vector<absl::Span<const int>> SharedClausesManager::GetUnseenClauses(
1315 int id) {
1316 std::vector<absl::Span<const int>> result;
1317 absl::MutexLock mutex_lock(&mutex_);
1318 for (int i = id_to_last_returned_batch_[id]; i < batches_.size(); ++i) {
1319 for (int j = 0; j < batches_[i].size(); ++j) {
1320 result.push_back(batches_[i][j]);
1321 }
1322 }
1323 id_to_last_finished_batch_[id] = id_to_last_returned_batch_[id];
1324 id_to_last_returned_batch_[id] = batches_.size();
1325 return result;
1326}
1327
1329 int id, std::vector<std::pair<int, int>>* new_clauses) {
1330 new_clauses->clear();
1331 absl::MutexLock mutex_lock(&mutex_);
1332 const int last_binary_clause_seen = id_to_last_processed_binary_clause_[id];
1333 if (last_binary_clause_seen >= last_visible_binary_clause_) return;
1334
1335 new_clauses->assign(
1336 added_binary_clauses_.begin() + last_binary_clause_seen,
1337 added_binary_clauses_.begin() + last_visible_binary_clause_);
1338 id_to_last_processed_binary_clause_[id] = last_visible_binary_clause_;
1339}
1340
1342 absl::MutexLock mutex_lock(&mutex_);
1343 absl::btree_map<std::string, int64_t> name_to_clauses;
1344 for (int id = 0; id < id_to_clauses_exported_.size(); ++id) {
1345 if (id_to_clauses_exported_[id] == 0) continue;
1346 name_to_clauses[id_to_worker_name_[id]] = id_to_clauses_exported_[id];
1347 }
1348 if (!name_to_clauses.empty()) {
1349 std::vector<std::vector<std::string>> table;
1350 table.push_back({"Clauses shared", "Num"});
1351 for (const auto& entry : name_to_clauses) {
1352 table.push_back({FormatName(entry.first), FormatCounter(entry.second)});
1353 }
1354 SOLVER_LOG(logger, FormatTable(table));
1355 }
1356}
1357
1359 absl::MutexLock mutex_lock(&mutex_);
1360 last_visible_binary_clause_ = added_binary_clauses_.size();
1361 const int num_workers = id_to_clause_stream_.size();
1362 if (num_workers <= 1) return;
1363 if (!share_timer_.IsRunning()) share_timer_.Start();
1364 if (share_timer_.GetDuration() < share_frequency_) return;
1365 share_timer_.Restart();
1366
1367 // Tune LBD threshold for individual workers based on how the worker's buffer
1368 // is. We aim to ensure workers can always export their fair share of clauses.
1369 for (int id = 0; id < num_workers; ++id) {
1370 UniqueClauseStream& stream = id_to_clause_stream_[id];
1371 const int lbd_threshold = stream.lbd_threshold();
1372 const int num_buffered_literals = stream.NumBufferedLiterals();
1373 const bool underfull =
1374 num_buffered_literals <
1376 const bool overfull =
1377 num_buffered_literals >
1379 const int new_lbd = std::clamp(lbd_threshold + underfull - overfull, 2,
1381 if (new_lbd != lbd_threshold) {
1382 VLOG(2) << id_to_worker_name_[id]
1383 << " sharing clauses with lbd <= " << new_lbd;
1384 stream.set_lbd_threshold(new_lbd);
1385 }
1386 }
1387
1388 std::vector<int> ids(num_workers);
1389 int literals_to_fill = UniqueClauseStream::kMaxLiteralsPerBatch;
1390 for (int size = UniqueClauseStream::kMinClauseSize;
1391 size <= UniqueClauseStream::kMaxClauseSize; ++size) {
1392 ids.clear();
1393 for (int id = 0; id < num_workers; ++id) {
1394 if (id_to_clause_stream_[id].NumBufferedLiteralsOfSize(size) > 0) {
1395 ids.push_back(id);
1396 }
1397 }
1398 // Use progressive filling to attempt to fill the batch with clauses of
1399 // minimum size, this is max-min fair.
1400 while (!ids.empty()) {
1401 const int clauses_to_fill = literals_to_fill / size;
1402 if (clauses_to_fill == 0) break;
1403 // Some workers need to export more clauses to fill the batch due to
1404 // rounding, but we don't want all workers to round up.
1405 const int num_to_round_up = clauses_to_fill % ids.size();
1406 for (int i = 0; i < ids.size(); ++i) {
1407 const bool round_up = i < num_to_round_up;
1408 const int id = ids[i];
1409 const int shared = id_to_clause_stream_[id].FillUpstreamBuffer(
1410 all_clauses_, size, clauses_to_fill / ids.size() + round_up);
1411 id_to_clauses_exported_[id] += shared;
1412 if (shared == 0 ||
1413 id_to_clause_stream_[id].NumBufferedLiteralsOfSize(size) == 0) {
1414 ids[i] = ids.back();
1415 ids.pop_back();
1416 --i;
1417 }
1418 }
1419 }
1420 }
1421 if (all_clauses_.NumBufferedLiterals() > 0) {
1422 batches_.push_back(all_clauses_.NextBatch());
1423 VLOG(2) << "Batch #" << batches_.size() << " w/ " << batches_.back().size()
1424 << " clauses max size = "
1425 << batches_.back()[batches_.back().size() - 1].size();
1426 }
1427 // Delete batches that have been consumed by all workers.
1428 // Keep a few batches around for startup (min finished batch doesn't count
1429 // workers that haven't registered yet).
1430 // This also ensures that our fingerprint table always contains the last few
1431 // batches, so we reduce the chance of an old buffered duplicate clause on
1432 // a worker being emitted from the global stream multiple times.
1433 if (batches_.size() < kMinBatches) return;
1434 const int min_finished_batch =
1435 std::min<int>(batches_.size() - kMinBatches,
1436 *absl::c_min_element(id_to_last_finished_batch_));
1437 for (int i = 0; i < min_finished_batch; ++i) {
1438 VLOG(2) << "Erasing batch";
1439 for (int i = 0; i < batches_.front().size(); ++i) {
1440 all_clauses_.Delete(batches_.front()[i]);
1441 }
1442 batches_.pop_front();
1443 }
1444 for (int id = 0; id < id_to_last_finished_batch_.size(); ++id) {
1445 id_to_last_returned_batch_[id] -= min_finished_batch;
1446 id_to_last_finished_batch_[id] -= min_finished_batch;
1447 }
1448 // TODO(user): We could cleanup binary clauses that have been consumed.
1449}
1450
1452 absl::Span<const std::pair<std::string, int64_t>> stats) {
1453 absl::MutexLock mutex_lock(&mutex_);
1454 for (const auto& [key, count] : stats) {
1455 stats_[key] += count;
1456 }
1457}
1458
1460 absl::MutexLock mutex_lock(&mutex_);
1461 if (stats_.empty()) return;
1462
1463 SOLVER_LOG(logger, "Stats across workers (summed):");
1464 std::vector<std::pair<std::string, int64_t>> to_sort_;
1465 for (const auto& [key, count] : stats_) {
1466 to_sort_.push_back({key, count});
1467 }
1468 std::sort(to_sort_.begin(), to_sort_.end());
1469 for (const auto& [key, count] : to_sort_) {
1470 SOLVER_LOG(logger, " ", key, ": ", FormatCounter(count));
1471 }
1472 SOLVER_LOG(logger, "");
1473}
1474
1475} // namespace sat
1476} // namespace operations_research
void reserve(int size)
Reserve memory if it is already known or tightly estimated.
Definition util.h:87
int Add(absl::Span< const V > values)
Definition util.h:746
The model "singleton" shared time limit.
Definition util.h:345
int NumBoundsExported(absl::string_view worker_name)
void UpdateDomains(std::vector< Domain > *domains)
void ReportPotentialNewBounds(const std::string &worker_name, absl::Span< const int > variables, absl::Span< const int64_t > new_lower_bounds, absl::Span< const int64_t > new_upper_bounds)
void FixVariablesFromPartialSolution(absl::Span< const int64_t > solution, absl::Span< const int > variables_to_fix)
void GetChangedBounds(int id, std::vector< int > *variables, std::vector< int64_t > *new_lower_bounds, std::vector< int64_t > *new_upper_bounds)
SharedBoundsManager(const CpModelProto &model_proto)
int RegisterNewId()
Ids are used to identify which worker is exporting/importing clauses.
void SetWorkerNameForId(int id, absl::string_view worker_name)
void AddBinaryClause(int id, int lit1, int lit2)
void LogStatistics(SolverLogger *logger)
Search statistics.
void GetUnseenBinaryClauses(int id, std::vector< std::pair< int, int > > *new_clauses)
std::vector< absl::Span< const int > > GetUnseenClauses(int id)
SharedClausesManager(bool always_synchronize, absl::Duration share_frequency)
std::vector< double > PopLast()
If there are no solution, this return an empty vector.
void AddSolution(const std::vector< double > &lp_solution)
void NewLPSolution(std::vector< double > lp_solution)
void AppendResponseToBeMerged(const CpSolverResponse &response)
void AddUnsatCore(const std::vector< int > &core)
void AddStatisticsPostprocessor(std::function< void(Model *, CpSolverResponse *)> postprocessor)
void LogMessageWithThrottling(const std::string &prefix, const std::string &message)
std::shared_ptr< const SharedSolutionRepository< int64_t >::Solution > NewSolution(absl::Span< const int64_t > solution_values, const std::string &solution_info, Model *model=nullptr)
void LogMessage(absl::string_view prefix, absl::string_view message)
Wrapper around our SolverLogger, but protected by mutex.
void SetSynchronizationMode(bool always_synchronize)
int AddLogCallback(std::function< std::string(const CpSolverResponse &)> callback)
int AddBestBoundCallback(std::function< void(double)> callback)
void DisplayImprovementStatistics()
Display improvement stats.
void InitializeObjective(const CpModelProto &cp_model)
void SetGapLimitsFromParameters(const SatParameters &parameters)
void AddFinalResponsePostprocessor(std::function< void(CpSolverResponse *)> postprocessor)
void NotifyThatImprovingProblemIsInfeasible(const std::string &worker_info)
void AddResponsePostprocessor(std::function< void(CpSolverResponse *)> postprocessor)
int AddSolutionCallback(std::function< void(const CpSolverResponse &)> callback)
void AddSolutionPostprocessor(std::function< void(std::vector< int64_t > *)> postprocessor)
void FillSolveStatsInResponse(Model *model, CpSolverResponse *response)
void UpdateInnerObjectiveBounds(const std::string &update_info, IntegerValue lb, IntegerValue ub)
Updates the inner objective bounds.
void AddStats(absl::Span< const std::pair< std::string, int64_t > > stats)
Adds a bunch of stats, adding count for the same key together.
void Log(SolverLogger *logger)
Logs all the added stats.
int FillUpstreamBuffer(UniqueClauseStream &upstream, int clause_size, int max_clauses_to_export) ABSL_LOCKS_EXCLUDED(mutex_)
int NumBufferedLiterals() const ABSL_LOCKS_EXCLUDED(mutex_)
static size_t HashClause(absl::Span< const int > clause, size_t hash_seed=0)
Computes a hash that is independent of the order of literals in the clause.
void set_lbd_threshold(int lbd) ABSL_LOCKS_EXCLUDED(mutex_)
bool Delete(absl::Span< const int > clause) ABSL_LOCKS_EXCLUDED(mutex_)
bool Add(absl::Span< const int > clause) ABSL_LOCKS_EXCLUDED(mutex_)
CompactVectorVector< int > NextBatch() ABSL_LOCKS_EXCLUDED(mutex_)
int lbd_threshold() const ABSL_LOCKS_EXCLUDED(mutex_)
static constexpr int kMaxLiteralsPerBatch
Export 4KiB of clauses per batch.
const bool DEBUG_MODE
Definition macros.h:26
Definition file.cc:169
absl::Status SetTextProto(absl::string_view filename, const google::protobuf::Message &proto, Options options)
Definition file.cc:337
Options Defaults()
Definition file.h:107
constexpr IntegerValue kMaxIntegerValue(std::numeric_limits< IntegerValue::ValueType >::max() - 1)
int64_t ComputeInnerObjective(const CpObjectiveProto &objective, absl::Span< const int64_t > solution)
bool WriteModelProtoToFile(const M &proto, absl::string_view filename)
constexpr IntegerValue kMinIntegerValue(-kMaxIntegerValue.value())
double ScaleObjectiveValue(const CpObjectiveProto &proto, int64_t value)
Scales back a objective value to a double value from the original model.
std::string ExtractSubSolverName(const std::string &improvement_info)
std::string FormatCounter(int64_t num)
Prints a positive number with separators for easier reading (ex: 1'348'065).
Definition util.cc:44
void FillDomainInProto(const Domain &domain, ProtoWithDomain *proto)
Serializes a Domain into the domain field of a proto.
std::vector< int > GetOrbits(int n, absl::Span< const std::unique_ptr< SparsePermutation > > generators)
Domain ReadDomainFromProto(const ProtoWithDomain &proto)
Reads a Domain from the domain field of a proto.
int64_t ScaleInnerObjectiveValue(const CpObjectiveProto &proto, int64_t value)
Similar to ScaleObjectiveValue() but uses the integer version.
std::string FormatName(absl::string_view name)
This is used to format our table first row entry.
Definition util.h:204
std::string FormatTable(std::vector< std::vector< std::string > > &table, int spacing)
Definition util.cc:72
In SWIG mode, we don't want anything besides these top-level includes.
Select next search node to expand Select next item_i to add this new search node to the search Generate a new search node where item_i is not in the knapsack Check validity of this new partial solution(using propagators) - If valid
STL namespace.
if(!yyg->yy_init)
Definition parser.yy.cc:965
ABSL_FLAG(bool, cp_model_dump_solutions, false, "DEBUG ONLY. If true, all the intermediate solution will be dumped " "under '\"FLAGS_cp_model_dump_prefix\" + \"solution_xxx.pb.txt\"'.")
#define SOLVER_LOG(logger,...)
Definition logging.h:109