Google OR-Tools v9.12
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
sharder.cc
Go to the documentation of this file.
1// Copyright 2010-2025 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19#include <functional>
20#include <vector>
21
22#include "Eigen/Core"
23#include "Eigen/SparseCore"
24#include "absl/log/check.h"
25#include "absl/synchronization/blocking_counter.h"
26#include "absl/time/time.h"
29#include "ortools/base/timer.h"
31
33
34using ::Eigen::VectorXd;
35
36Sharder::Sharder(const int64_t num_elements, const int num_shards,
37 Scheduler* const scheduler,
38 const std::function<int64_t(int64_t)>& element_mass)
39 : scheduler_(scheduler) {
40 CHECK_GE(num_elements, 0);
41 if (num_elements == 0) {
42 shard_starts_.push_back(0);
43 return;
44 }
45 CHECK_GE(num_shards, 1);
46 shard_starts_.reserve(
47 std::min(static_cast<int64_t>(num_shards), num_elements) + 1);
48 shard_masses_.reserve(
49 std::min(static_cast<int64_t>(num_shards), num_elements));
50 int64_t overall_mass = 0;
51 for (int64_t elem = 0; elem < num_elements; ++elem) {
52 overall_mass += element_mass(elem);
53 }
54 shard_starts_.push_back(0);
55 int64_t this_shard_mass = element_mass(0);
56 for (int64_t elem = 1; elem < num_elements; ++elem) {
57 int64_t this_elem_mass = element_mass(elem);
58 if (this_shard_mass + (this_elem_mass / 2) >= overall_mass / num_shards) {
59 // `elem` starts a new shard.
60 shard_masses_.push_back(this_shard_mass);
61 shard_starts_.push_back(elem);
62 this_shard_mass = this_elem_mass;
63 } else {
64 this_shard_mass += this_elem_mass;
65 }
66 }
67 shard_starts_.push_back(num_elements);
68 shard_masses_.push_back(this_shard_mass);
69 CHECK_EQ(NumShards(), shard_masses_.size());
70}
71
72Sharder::Sharder(const int64_t num_elements, const int num_shards,
73 Scheduler* const scheduler)
74 : scheduler_(scheduler) {
75 CHECK_GE(num_elements, 0);
76 if (num_elements == 0) {
77 shard_starts_.push_back(0);
78 return;
79 }
80 CHECK_GE(num_shards, 1);
81 shard_starts_.reserve(std::min(int64_t{num_shards}, num_elements) + 1);
82 shard_masses_.reserve(std::min(int64_t{num_shards}, num_elements));
83 if (num_shards >= num_elements) {
84 for (int64_t element = 0; element < num_elements; ++element) {
85 shard_starts_.push_back(static_cast<int>(element));
86 shard_masses_.push_back(1);
87 }
88 } else {
89 for (int shard = 0; shard < num_shards; ++shard) {
90 const int64_t this_shard_start = ((num_elements * shard) / num_shards);
91 const int64_t next_shard_start =
92 ((num_elements * (shard + 1)) / num_shards);
93 if (next_shard_start - this_shard_start > 0) {
94 shard_starts_.push_back(this_shard_start);
95 shard_masses_.push_back(next_shard_start - this_shard_start);
96 }
97 }
98 }
99 shard_starts_.push_back(num_elements);
100 CHECK_EQ(NumShards(), shard_masses_.size());
101}
102
103Sharder::Sharder(const Sharder& other_sharder, const int64_t num_elements)
104 // The `std::max()` protects against `other_sharder.NumShards() == 0`, which
105 // will happen if `other_sharder` had `num_elements == 0`.
106 : Sharder(num_elements, std::max(1, other_sharder.NumShards()),
107 other_sharder.scheduler_) {}
108
110 const std::function<void(const Shard&)>& func) const {
111 if (scheduler_) {
112 absl::BlockingCounter counter(NumShards());
113 VLOG(2) << "Starting ParallelForEachShard()";
114 scheduler_->ParallelFor(0, NumShards(), [&](int shard_num) {
115 WallTimer timer;
116 if (VLOG_IS_ON(2)) {
117 timer.Start();
118 }
119 func(Shard(shard_num, this));
120 if (VLOG_IS_ON(2)) {
121 timer.Stop();
122 VLOG(2) << "Shard " << shard_num << " with " << ShardSize(shard_num)
123 << " elements and " << ShardMass(shard_num)
124 << " mass finished with "
125 << ShardMass(shard_num) /
126 std::max(int64_t{1},
127 absl::ToInt64Microseconds(timer.GetDuration()))
128 << " mass/usec.";
129 }
130 });
131 VLOG(2) << "Done ParallelForEachShard()";
132 } else {
133 for (int shard_num = 0; shard_num < NumShards(); ++shard_num) {
134 func(Shard(shard_num, this));
135 }
136 }
137}
138
140 const std::function<double(const Shard&)>& func) const {
141 VectorXd local_sums(NumShards());
142 ParallelForEachShard([&](const Sharder::Shard& shard) {
143 local_sums[shard.Index()] = func(shard);
144 });
145 return local_sums.sum();
146}
147
149 const std::function<bool(const Shard&)>& func) const {
150 // Recall `std::vector<bool>` is not thread-safe.
151 std::vector<int> local_result(NumShards());
152 ParallelForEachShard([&](const Sharder::Shard& shard) {
153 local_result[shard.Index()] = static_cast<int>(func(shard));
154 });
155 return std::all_of(local_result.begin(), local_result.end(),
156 [](const int v) { return static_cast<bool>(v); });
157}
158
160 const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix,
161 const VectorXd& vector, const Sharder& sharder) {
162 CHECK_EQ(vector.size(), matrix.rows());
163 VectorXd answer(matrix.cols());
164 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
165 // NOTE: For very sparse columns, assignment to `shard(answer)` incurs a
166 // measurable overhead compared to using a constructor
167 // (i.e. `VectorXd temp = ...`). It is not clear why this is the case, nor
168 // how to avoid it.
169 shard(answer) = shard(matrix).transpose() * vector;
170 });
171 return answer;
172}
173
174void SetZero(const Sharder& sharder, VectorXd& dest) {
175 dest.resize(sharder.NumElements());
176 sharder.ParallelForEachShard(
177 [&](const Sharder::Shard& shard) { shard(dest).setZero(); });
178}
179
180VectorXd ZeroVector(const Sharder& sharder) {
181 VectorXd result(sharder.NumElements());
182 SetZero(sharder, result);
183 return result;
184}
185
186VectorXd OnesVector(const Sharder& sharder) {
187 VectorXd result(sharder.NumElements());
188 sharder.ParallelForEachShard(
189 [&](const Sharder::Shard& shard) { shard(result).setOnes(); });
190 return result;
191}
192
193void AddScaledVector(const double scale, const VectorXd& increment,
194 const Sharder& sharder, VectorXd& dest) {
195 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
196 shard(dest) += scale * shard(increment);
197 });
198}
199
200void AssignVector(const VectorXd& vec, const Sharder& sharder, VectorXd& dest) {
201 dest.resize(vec.size());
202 sharder.ParallelForEachShard(
203 [&](const Sharder::Shard& shard) { shard(dest) = shard(vec); });
204}
205
206VectorXd CloneVector(const VectorXd& vec, const Sharder& sharder) {
207 VectorXd dest;
208 AssignVector(vec, sharder, dest);
209 return dest;
210}
211
212void CoefficientWiseProductInPlace(const VectorXd& scale,
213 const Sharder& sharder, VectorXd& dest) {
214 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
215 shard(dest) = shard(dest).cwiseProduct(shard(scale));
216 });
217}
218
219void CoefficientWiseQuotientInPlace(const VectorXd& scale,
220 const Sharder& sharder, VectorXd& dest) {
221 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
222 shard(dest) = shard(dest).cwiseQuotient(shard(scale));
223 });
224}
225
226double Dot(const VectorXd& v1, const VectorXd& v2, const Sharder& sharder) {
227 return sharder.ParallelSumOverShards(
228 [&](const Sharder::Shard& shard) { return shard(v1).dot(shard(v2)); });
229}
230
231double LInfNorm(const VectorXd& vector, const Sharder& sharder) {
232 VectorXd local_max(sharder.NumShards());
233 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
234 local_max[shard.Index()] = shard(vector).lpNorm<Eigen::Infinity>();
235 });
236 return local_max.lpNorm<Eigen::Infinity>();
237}
238
239double L1Norm(const VectorXd& vector, const Sharder& sharder) {
240 return sharder.ParallelSumOverShards(
241 [&](const Sharder::Shard& shard) { return shard(vector).lpNorm<1>(); });
242}
243
244double SquaredNorm(const VectorXd& vector, const Sharder& sharder) {
245 return sharder.ParallelSumOverShards(
246 [&](const Sharder::Shard& shard) { return shard(vector).squaredNorm(); });
247}
248
249double Norm(const VectorXd& vector, const Sharder& sharder) {
250 return std::sqrt(SquaredNorm(vector, sharder));
251}
252
253double SquaredDistance(const VectorXd& vector1, const VectorXd& vector2,
254 const Sharder& sharder) {
255 return sharder.ParallelSumOverShards([&](const Sharder::Shard& shard) {
256 return (shard(vector1) - shard(vector2)).squaredNorm();
257 });
258}
259
260double Distance(const VectorXd& vector1, const VectorXd& vector2,
261 const Sharder& sharder) {
262 return std::sqrt(SquaredDistance(vector1, vector2, sharder));
263}
264
265double ScaledLInfNorm(const VectorXd& vector, const VectorXd& scale,
266 const Sharder& sharder) {
267 VectorXd local_max(sharder.NumShards());
268 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
269 local_max[shard.Index()] =
270 shard(vector).cwiseProduct(shard(scale)).lpNorm<Eigen::Infinity>();
271 });
272 return local_max.lpNorm<Eigen::Infinity>();
273}
274
275double ScaledSquaredNorm(const VectorXd& vector, const VectorXd& scale,
276 const Sharder& sharder) {
277 return sharder.ParallelSumOverShards([&](const Sharder::Shard& shard) {
278 return shard(vector).cwiseProduct(shard(scale)).squaredNorm();
279 });
280}
281
282double ScaledNorm(const VectorXd& vector, const VectorXd& scale,
283 const Sharder& sharder) {
284 return std::sqrt(ScaledSquaredNorm(vector, scale, sharder));
285}
286
288 const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix,
289 const VectorXd& row_scaling_vec, const VectorXd& col_scaling_vec,
290 const Sharder& sharder) {
291 CHECK_EQ(matrix.cols(), col_scaling_vec.size());
292 CHECK_EQ(matrix.rows(), row_scaling_vec.size());
293 VectorXd answer(matrix.cols());
294 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
295 auto matrix_shard = shard(matrix);
296 auto col_scaling_shard = shard(col_scaling_vec);
297 for (int64_t col_num = 0; col_num < shard(matrix).outerSize(); ++col_num) {
298 double max = 0.0;
299 for (decltype(matrix_shard)::InnerIterator it(matrix_shard, col_num); it;
300 ++it) {
301 max = std::max(max, std::abs(it.value() * row_scaling_vec[it.row()]));
302 }
303 shard(answer)[col_num] = max * std::abs(col_scaling_shard[col_num]);
304 }
305 });
306 return answer;
307}
308
310 const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix,
311 const VectorXd& row_scaling_vec, const VectorXd& col_scaling_vec,
312 const Sharder& sharder) {
313 CHECK_EQ(matrix.cols(), col_scaling_vec.size());
314 CHECK_EQ(matrix.rows(), row_scaling_vec.size());
315 VectorXd answer(matrix.cols());
316 sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
317 auto matrix_shard = shard(matrix);
318 auto col_scaling_shard = shard(col_scaling_vec);
319 for (int64_t col_num = 0; col_num < shard(matrix).outerSize(); ++col_num) {
320 double sum_of_squares = 0.0;
321 for (decltype(matrix_shard)::InnerIterator it(matrix_shard, col_num); it;
322 ++it) {
323 sum_of_squares +=
324 MathUtil::Square(it.value() * row_scaling_vec[it.row()]);
325 }
326 shard(answer)[col_num] =
327 std::sqrt(sum_of_squares) * std::abs(col_scaling_shard[col_num]);
328 }
329 });
330 return answer;
331}
332
333} // namespace operations_research::pdlp
absl::Duration GetDuration() const
Definition timer.h:49
void Start()
When Start() is called multiple times, only the most recent is used.
Definition timer.h:32
void Stop()
Definition timer.h:40
static T Square(const T x)
Returns the square of x.
Definition mathutil.h:102
Thread scheduling interface.
Definition scheduler.h:34
bool ParallelTrueForAllShards(const std::function< bool(const Shard &)> &func) const
Definition sharder.cc:148
double ParallelSumOverShards(const std::function< double(const Shard &)> &func) const
Runs func on each of the shards and sums the results.
Definition sharder.cc:139
int64_t ShardSize(int shard) const
Definition sharder.h:186
int64_t ShardMass(int shard) const
Definition sharder.h:198
Sharder(int64_t num_elements, int num_shards, Scheduler *scheduler, const std::function< int64_t(int64_t)> &element_mass)
Definition sharder.cc:36
void ParallelForEachShard(const std::function< void(const Shard &)> &func) const
Runs func on each of the shards.
Definition sharder.cc:109
int64_t NumElements() const
The number of elements that are split into shards.
Definition sharder.h:184
Validation utilities for solvers.proto.
double SquaredNorm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:244
void SetZero(const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:174
double ScaledNorm(const VectorXd &vector, const VectorXd &scale, const Sharder &sharder)
Definition sharder.cc:282
double Dot(const VectorXd &v1, const VectorXd &v2, const Sharder &sharder)
Definition sharder.cc:226
double SquaredDistance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:253
double LInfNorm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:231
double Distance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition sharder.cc:260
VectorXd TransposedMatrixVectorProduct(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:159
double ScaledLInfNorm(const VectorXd &vector, const VectorXd &scale, const Sharder &sharder)
Definition sharder.cc:265
double ScaledSquaredNorm(const VectorXd &vector, const VectorXd &scale, const Sharder &sharder)
Definition sharder.cc:275
VectorXd ScaledColLInfNorm(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &row_scaling_vec, const VectorXd &col_scaling_vec, const Sharder &sharder)
Definition sharder.cc:287
void AddScaledVector(const double scale, const VectorXd &increment, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:193
void CoefficientWiseProductInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:212
void CoefficientWiseQuotientInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:219
VectorXd ZeroVector(const Sharder &sharder)
Like VectorXd::Zero(sharder.NumElements()).
Definition sharder.cc:180
VectorXd ScaledColL2Norm(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &row_scaling_vec, const VectorXd &col_scaling_vec, const Sharder &sharder)
Definition sharder.cc:309
double L1Norm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:239
VectorXd CloneVector(const VectorXd &vec, const Sharder &sharder)
Definition sharder.cc:206
VectorXd OnesVector(const Sharder &sharder)
Like VectorXd::Ones(sharder.NumElements()).
Definition sharder.cc:186
double Norm(const VectorXd &vector, const Sharder &sharder)
Definition sharder.cc:249
void AssignVector(const VectorXd &vec, const Sharder &sharder, VectorXd &dest)
Definition sharder.cc:200
STL namespace.