Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
sparse_vector.h
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// Classes to represent sparse vectors.
15//
16// The following are very good references for terminology, data structures,
17// and algorithms:
18//
19// I.S. Duff, A.M. Erisman and J.K. Reid, "Direct Methods for Sparse Matrices",
20// Clarendon, Oxford, UK, 1987, ISBN 0-19-853421-3,
21// http://www.amazon.com/dp/0198534213.
22//
23//
24// T.A. Davis, "Direct methods for Sparse Linear Systems", SIAM, Philadelphia,
25// 2006, ISBN-13: 978-0-898716-13, http://www.amazon.com/dp/0898716136.
26//
27//
28// Both books also contain a wealth of references.
29
30#ifndef OR_TOOLS_LP_DATA_SPARSE_VECTOR_H_
31#define OR_TOOLS_LP_DATA_SPARSE_VECTOR_H_
32
33#include <algorithm>
34#include <cstdlib>
35#include <cstring>
36#include <memory>
37#include <string>
38#include <utility>
39
40#include "absl/strings/str_format.h"
41#include "ortools/base/logging.h" // for CHECK*
42#include "ortools/base/types.h"
47
48namespace operations_research {
49namespace glop {
50
51template <typename IndexType>
52class SparseVectorEntry;
53
54// --------------------------------------------------------
55// SparseVector
56// --------------------------------------------------------
57// This class allows to store a vector taking advantage of its sparsity.
58// Space complexity is in O(num_entries).
59// In the current implementation, entries are stored in a first-in order (order
60// of SetCoefficient() calls) when they are added; then the "cleaning" process
61// sorts them by index (and duplicates are removed: the last entry takes
62// precedence).
63// Many methods assume that the entries are sorted by index and without
64// duplicates, and DCHECK() that.
65//
66// Default copy construction is fully supported.
67//
68// This class uses strong integer types (i.e. no implicit cast to/from other
69// integer types) for both:
70// - the index of entries (eg. SparseVector<RowIndex> is a SparseColumn,
71// see ./sparse_column.h).
72// - the *internal* indices of entries in the internal storage, which is an
73// entirely different type: EntryType.
74// This class can be extended with a custom iterator/entry type for the
75// iterator-based API. This can be used to extend the interface with additional
76// methods for the entries returned by the iterators; for an example of such
77// extension, see SparseColumnEntry in sparse_column.h. The custom entries and
78// iterators should be derived from SparseVectorEntry and SparseVectorIterator,
79// or at least provide the same public and protected interface.
80//
81// TODO(user): un-expose this type to client; by getting rid of the
82// index-based APIs and leveraging iterator-based APIs; if possible.
83template <typename IndexType,
84 typename IteratorType = VectorIterator<SparseVectorEntry<IndexType>>>
85class SparseVector {
86 public:
87 typedef IndexType Index;
88
92 using Iterator = IteratorType;
93 using Entry = typename Iterator::Entry;
96
97 // NOTE(user): STL uses the expensive copy constructor when relocating
98 // elements of a vector, unless the move constructor exists *and* it is marked
99 // as noexcept. However, the noexcept annotation is banned by the style guide,
100 // and the only way to get it is by using the default move constructor and
101 // assignment operator generated by the compiler.
102 SparseVector(const SparseVector& other);
103#if !defined(_MSC_VER)
104 SparseVector(SparseVector&& other) = default;
105#endif
107 SparseVector& operator=(const SparseVector& other);
108#if !defined(_MSC_VER)
110#endif
112 // Read-only API for a given SparseVector entry. The typical way for a
113 // client to use this is to use the natural range iteration defined by the
114 // Iterator class below:
115 // SparseVector<int> v;
116 // ...
117 // for (const SparseVector<int>::Entry e : v) {
118 // LOG(INFO) << "Index: " << e.index() << ", Coeff: " << e.coefficient();
119 // }
120 //
121 // Note that this can only be used when the vector has no duplicates.
122 //
123 // Note(user): using either "const SparseVector<int>::Entry&" or
124 // "const SparseVector<int>::Entry" yields the exact same performance on the
125 // netlib, thus we recommend to use the latter version, for consistency.
126 Iterator begin() const;
127 Iterator end() const;
129 // Clears the vector, i.e. removes all entries.
130 void Clear();
131
132 // Clears the vector and releases the memory it uses.
133 void ClearAndRelease();
134
135 // Reserve the underlying storage for the given number of entries.
136 void Reserve(EntryIndex new_capacity);
137
138 // Returns true if the vector is empty.
139 bool IsEmpty() const;
140
141 // Cleans the vector, i.e. removes zero-values entries, removes duplicates
142 // entries and sorts remaining entries in increasing index order.
143 // Runs in O(num_entries * log(num_entries)).
144 void CleanUp();
145
146 // Returns true if the entries of this SparseVector are in strictly increasing
147 // index order and if the vector contains no duplicates nor zero coefficients.
148 // Runs in O(num_entries). It is not const because it modifies
149 // possibly_contains_duplicates_.
150 bool IsCleanedUp() const;
151
152 // Swaps the content of this sparse vector with the one passed as argument.
153 // Works in O(1).
154 void Swap(SparseVector* other);
155
156 // Populates the current vector from sparse_vector.
157 // Runs in O(num_entries).
158 void PopulateFromSparseVector(const SparseVector& sparse_vector);
159
160 // Populates the current vector from dense_vector.
161 // Runs in O(num_indices_in_dense_vector).
162 void PopulateFromDenseVector(const DenseVector& dense_vector);
163
164 // Appends all entries from sparse_vector to the current vector; the indices
165 // of the appended entries are increased by offset. If the current vector
166 // already has a value at an index changed by this method, this value is
167 // overwritten with the value from sparse_vector.
168 // Note that while offset may be negative itself, the indices of all entries
169 // after applying the offset must be non-negative.
170 void AppendEntriesWithOffset(const SparseVector& sparse_vector, Index offset);
171
172 // Returns true when the vector contains no duplicates. Runs in
173 // O(max_index + num_entries), max_index being the largest index in entry.
174 // This method allocates (and deletes) a Boolean array of size max_index.
175 // Note that we use a mutable Boolean to make subsequent call runs in O(1).
176 bool CheckNoDuplicates() const;
177
178 // Same as CheckNoDuplicates() except it uses a reusable boolean vector
179 // to make the code more efficient. Runs in O(num_entries).
180 // Note that boolean_vector should be initialized to false before calling this
181 // method; It will remain equal to false after calls to CheckNoDuplicates().
182 // Note that we use a mutable Boolean to make subsequent call runs in O(1).
183 bool CheckNoDuplicates(StrictITIVector<Index, bool>* boolean_vector) const;
184
185 // Defines the coefficient at index, i.e. vector[index] = value;
187
188 // Removes an entry from the vector if present. The order of the other entries
189 // is preserved. Runs in O(num_entries).
190 void DeleteEntry(Index index);
191
192 // Sets to 0.0 (i.e. remove) all entries whose fabs() is lower or equal to
193 // the given threshold.
194 void RemoveNearZeroEntries(Fractional threshold);
195
196 // Same as RemoveNearZeroEntries, but the entry magnitude of each row is
197 // multiplied by weights[row] before being compared with threshold.
199 const DenseVector& weights);
201 // Moves the entry with given Index to the first position in the vector. If
202 // the entry is not present, nothing happens.
204
205 // Moves the entry with given Index to the last position in the vector. If
206 // the entry is not present, nothing happens.
208
209 // Multiplies all entries by factor.
210 // i.e. entry.coefficient *= factor.
211 void MultiplyByConstant(Fractional factor);
212
213 // Multiplies all entries by its corresponding factor,
214 // i.e. entry.coefficient *= factors[entry.index].
215 void ComponentWiseMultiply(const DenseVector& factors);
216
217 // Divides all entries by factor.
218 // i.e. entry.coefficient /= factor.
219 void DivideByConstant(Fractional factor);
220
221 // Divides all entries by its corresponding factor,
222 // i.e. entry.coefficient /= factors[entry.index].
223 void ComponentWiseDivide(const DenseVector& factors);
224
225 // Populates a dense vector from the sparse vector.
226 // Runs in O(num_indices) as the dense vector values have to be reset to 0.0.
227 void CopyToDenseVector(Index num_indices, DenseVector* dense_vector) const;
228
229 // Populates a dense vector from the permuted sparse vector.
230 // Runs in O(num_indices) as the dense vector values have to be reset to 0.0.
231 void PermutedCopyToDenseVector(const IndexPermutation& index_perm,
232 Index num_indices,
233 DenseVector* dense_vector) const;
234
235 // Performs the operation dense_vector += multiplier * this.
236 // This is known as multiply-accumulate or (fused) multiply-add.
237 void AddMultipleToDenseVector(Fractional multiplier,
238 DenseVector* dense_vector) const;
240 // WARNING: BOTH vectors (the current and the destination) MUST be "clean",
241 // i.e. sorted and without duplicates.
242 // Performs the operation accumulator_vector += multiplier * this, removing
243 // a given index which must be in both vectors, and pruning new entries whose
244 // absolute value are under the given drop_tolerance.
246 Fractional multiplier, Index removed_common_index,
247 Fractional drop_tolerance, SparseVector* accumulator_vector) const;
248
249 // Same as AddMultipleToSparseVectorAndDeleteCommonIndex() but instead of
250 // deleting the common index, leave it unchanged.
252 Fractional multiplier, Index removed_common_index,
253 Fractional drop_tolerance, SparseVector* accumulator_vector) const;
254
255 // Applies the index permutation to all entries: index = index_perm[index];
256 void ApplyIndexPermutation(const IndexPermutation& index_perm);
257
258 // Same as ApplyIndexPermutation but deletes the index if index_perm[index]
259 // is negative.
260 void ApplyPartialIndexPermutation(const IndexPermutation& index_perm);
261
262 // Removes the entries for which index_perm[index] is non-negative and appends
263 // them to output. Note that the index of the entries are NOT permuted.
264 void MoveTaggedEntriesTo(const IndexPermutation& index_perm,
265 SparseVector* output);
267 // Returns the coefficient at position index.
268 // Call with care: runs in O(number-of-entries) as entries may not be sorted.
270
271 // Note this method can only be used when the vector has no duplicates.
272 EntryIndex num_entries() const {
273 DCHECK(CheckNoDuplicates());
274 return EntryIndex(num_entries_);
275 }
276
277 // Returns the first entry's index and coefficient; note that 'first' doesn't
278 // mean 'entry with the smallest index'.
279 // Runs in O(1).
280 // Note this method can only be used when the vector has no duplicates.
281 Index GetFirstIndex() const {
282 DCHECK(CheckNoDuplicates());
283 return GetIndex(EntryIndex(0));
284 }
286 DCHECK(CheckNoDuplicates());
287 return GetCoefficient(EntryIndex(0));
288 }
289
290 // Like GetFirst*, but for the last entry.
291 Index GetLastIndex() const {
292 DCHECK(CheckNoDuplicates());
293 return GetIndex(num_entries() - 1);
294 }
296 DCHECK(CheckNoDuplicates());
298 }
299
300 // Allows to loop over the entry indices like this:
301 // for (const EntryIndex i : sparse_vector.AllEntryIndices()) { ... }
302 // TODO(user): consider removing this, in favor of the natural range
303 // iteration.
305 return ::util::IntegerRange<EntryIndex>(EntryIndex(0), num_entries_);
307
308 // Returns true if this vector is exactly equal to the given one, i.e. all its
309 // index indices and coefficients appear in the same order and are equal.
310 bool IsEqualTo(const SparseVector& other) const;
311
312 // An exhaustive, pretty-printed listing of the entries, in their
313 // internal order. a.DebugString() == b.DebugString() iff a.IsEqualTo(b).
314 std::string DebugString() const;
315
316 protected:
317 // Adds a new entry to the sparse vector, growing the internal buffer if
318 // needed. It does not set may_contain_duplicates_ to true.
320 DCHECK_GE(index, 0);
321 // Grow the internal storage if there is no space left for the new entry. We
322 // increase the size to max(4, 1.5*current capacity).
323 if (num_entries_ == capacity_) {
324 // Reserve(capacity_ == 0 ? EntryIndex(4)
325 // : EntryIndex(2 * capacity_.value()));
326 Reserve(capacity_ == 0 ? EntryIndex(4)
327 : EntryIndex(2 * capacity_.value()));
328 DCHECK_LT(num_entries_, capacity_);
329 }
330 const EntryIndex new_entry_index = num_entries_;
331 ++num_entries_;
332 MutableIndex(new_entry_index) = index;
333 MutableCoefficient(new_entry_index) = value;
334 }
335
336 // Resizes the sparse vector to a smaller size, without re-allocating the
337 // internal storage.
338 void ResizeDown(EntryIndex new_size) {
339 DCHECK_GE(new_size, 0);
340 DCHECK_LE(new_size, num_entries_);
341 num_entries_ = new_size;
342 }
343
344 // Read-only access to the indices and coefficients of the entries of the
345 // sparse vector.
346 Index GetIndex(EntryIndex i) const {
347 DCHECK_GE(i, 0);
348 DCHECK_LT(i, num_entries_);
349 return index_[i.value()];
350 }
351 Fractional GetCoefficient(EntryIndex i) const {
352 DCHECK_GE(i, 0);
353 DCHECK_LT(i, num_entries_);
354 return coefficient_[i.value()];
355 }
356
357 // Mutable access to the indices and coefficients of the entries of the sparse
358 // vector.
359 Index& MutableIndex(EntryIndex i) {
360 DCHECK_GE(i, 0);
361 DCHECK_LT(i, num_entries_);
362 return index_[i.value()];
363 }
364 Fractional& MutableCoefficient(EntryIndex i) {
365 DCHECK_GE(i, 0);
366 DCHECK_LT(i, num_entries_);
367 return coefficient_[i.value()];
368 }
369
370 // The internal storage of the sparse vector. Both the indices and the
371 // coefficients are stored in the same buffer; the first
372 // sizeof(Index)*capacity_ bytes are used for storing the indices, the
373 // following sizeof(Fractional)*capacity_ bytes contain the values. This
374 // representation ensures that for small vectors, both the indices and the
375 // coefficients are in the same page/cache line.
376 // We use a single buffer for both arrays. The amount of data copied during
377 // relocations is the same in both cases, and it is much smaller than the cost
378 // of an additional allocation - especially when the vectors are small.
379 // Moreover, using two separate vectors/buffers would mean that even small
380 // vectors would be spread across at least two different cache lines.
381 std::unique_ptr<char[]> buffer_;
382 EntryIndex num_entries_;
383 EntryIndex capacity_;
385 // Pointers to the first elements of the index and coefficient arrays.
386 Index* index_ = nullptr;
387 Fractional* coefficient_ = nullptr;
389 // This is here to speed up the CheckNoDuplicates() methods and is mutable
390 // so we can perform checks on const argument.
391 mutable bool may_contain_duplicates_;
392
393 private:
394 // Actual implementation of AddMultipleToSparseVectorAndDeleteCommonIndex()
395 // and AddMultipleToSparseVectorAndIgnoreCommonIndex() which is shared.
396 void AddMultipleToSparseVectorInternal(
397 bool delete_common_index, Fractional multiplier, Index common_index,
398 Fractional drop_tolerance, SparseVector* accumulator_vector) const;
399};
400
401// --------------------------------------------------------
402// SparseVectorEntry
403// --------------------------------------------------------
404
405// A reference-like class that points to a certain element of a sparse data
406// structure that stores its elements in two parallel arrays. The main purpose
407// of the entry class is to support implementation of iterator objects over the
408// sparse data structure.
409// Note that the entry object does not own the data, and it is valid only as
410// long as the underlying sparse data structure; it may also be invalidated if
411// the underlying sparse data structure is modified.
412template <typename IndexType>
413class SparseVectorEntry {
414 public:
415 using Index = IndexType;
416
417 Index index() const { return index_[i_.value()]; }
418 Fractional coefficient() const { return coefficient_[i_.value()]; }
420 protected:
421 // Creates the sparse vector entry from the given base pointers and the index.
422 // We accept the low-level data structures rather than a SparseVector
423 // reference to make it possible to use the SparseVectorEntry and
424 // SparseVectorIterator classes also for other data structures using the same
425 // internal data representation.
426 // Note that the constructor is intentionally made protected, so that the
427 // entry can be created only as a part of the construction of an iterator over
428 // a sparse data structure.
429 SparseVectorEntry(const Index* indices, const Fractional* coefficients,
430 EntryIndex i)
431 : i_(i), index_(indices), coefficient_(coefficients) {}
432
433 // The index of the sparse vector entry represented by this object.
434 EntryIndex i_;
435 // The index and coefficient arrays of the sparse vector.
436 // NOTE(user): Keeping directly the index and the base pointers gives the
437 // best performance with a tiny margin of the options:
438 // 1. keep the base pointers and an index of the current entry,
439 // 2. keep pointers to the current index and the current coefficient and
440 // increment both when moving the iterator.
441 // 3. keep a pointer to the sparse vector object and the index of the current
442 // entry.
443 const Index* index_;
445};
446
447template <typename IndexType, typename IteratorType>
449 return Iterator(this->index_, this->coefficient_, EntryIndex(0));
450}
451
452template <typename IndexType, typename IteratorType>
454 return Iterator(this->index_, this->coefficient_, num_entries_);
455}
456
457// --------------------------------------------------------
458// SparseVector implementation
459// --------------------------------------------------------
460template <typename IndexType, typename IteratorType>
462 : num_entries_(0),
463 capacity_(0),
464 index_(nullptr),
465 coefficient_(nullptr),
466 may_contain_duplicates_(false) {}
468template <typename IndexType, typename IteratorType>
470 PopulateFromSparseVector(other);
471}
472
473template <typename IndexType, typename IteratorType>
476 PopulateFromSparseVector(other);
477 return *this;
478}
479
480template <typename IndexType, typename IteratorType>
482 num_entries_ = EntryIndex(0);
483 may_contain_duplicates_ = false;
484}
485
486template <typename IndexType, typename IteratorType>
488 capacity_ = EntryIndex(0);
489 num_entries_ = EntryIndex(0);
490 index_ = nullptr;
491 coefficient_ = nullptr;
492 buffer_.reset();
493 may_contain_duplicates_ = false;
494}
495
496template <typename IndexType, typename IteratorType>
497void SparseVector<IndexType, IteratorType>::Reserve(EntryIndex new_capacity) {
498 if (new_capacity <= capacity_) return;
499 // Round up the capacity to a multiple of four. This way, the start of the
500 // coefficient array will be aligned to 16-bytes, provided that the buffer
501 // used for storing the data is aligned in that way.
502 if (new_capacity.value() & 3) {
503 new_capacity += EntryIndex(4 - (new_capacity.value() & 3));
504 }
505
506 const size_t index_buffer_size = new_capacity.value() * sizeof(Index);
507 const size_t value_buffer_size = new_capacity.value() * sizeof(Fractional);
508 const size_t new_buffer_size = index_buffer_size + value_buffer_size;
509 std::unique_ptr<char[]> new_buffer(new char[new_buffer_size]);
510 IndexType* const new_index = reinterpret_cast<Index*>(new_buffer.get());
511 Fractional* const new_coefficient =
512 reinterpret_cast<Fractional*>(new_index + new_capacity.value());
513
514 // Avoid copying the data if the vector is empty.
515 if (num_entries_ > 0) {
516 // NOTE(user): We use memmove instead of std::copy, because the latter
517 // leads to naive copying code when used with strong ints (a loop that
518 // copies a single 32-bit value in each iteration), and as of 06/2016,
519 // memmove is 3-4x faster on Haswell.
520 std::memmove(new_index, index_, sizeof(IndexType) * num_entries_.value());
521 std::memmove(new_coefficient, coefficient_,
522 sizeof(Fractional) * num_entries_.value());
523 }
524 std::swap(buffer_, new_buffer);
525 index_ = new_index;
526 coefficient_ = new_coefficient;
527 capacity_ = new_capacity;
528}
529
530template <typename IndexType, typename IteratorType>
532 return num_entries_ == EntryIndex(0);
533}
534
535template <typename IndexType, typename IteratorType>
536void SparseVector<IndexType, IteratorType>::Swap(SparseVector* other) {
537 std::swap(buffer_, other->buffer_);
538 std::swap(num_entries_, other->num_entries_);
539 std::swap(capacity_, other->capacity_);
540 std::swap(may_contain_duplicates_, other->may_contain_duplicates_);
541 std::swap(index_, other->index_);
542 std::swap(coefficient_, other->coefficient_);
543}
544
545template <typename IndexType, typename IteratorType>
547 // TODO(user): Implement in-place sorting of the entries and cleanup. The
548 // current version converts the data to an array-of-pairs representation that
549 // can be sorted easily with std::stable_sort, and the converts the sorted
550 // data back to the struct-of-arrays implementation.
551 // The current version is ~20% slower than the in-place sort on the
552 // array-of-struct representation. It is not visible on GLOP benchmarks, but
553 // it increases peak memory usage by ~8%.
554 // Implementing in-place search will require either implementing a custom
555 // sorting code, or custom iterators that abstract away the internal
556 // representation.
557 std::vector<std::pair<Index, Fractional>> entries;
558 entries.reserve(num_entries_.value());
559 for (EntryIndex i(0); i < num_entries_; ++i) {
560 entries.emplace_back(GetIndex(i), GetCoefficient(i));
561 }
562 std::stable_sort(
563 entries.begin(), entries.end(),
564 [](const std::pair<Index, Fractional>& a,
565 const std::pair<Index, Fractional>& b) { return a.first < b.first; });
566
567 EntryIndex new_size(0);
568 for (int i = 0; i < num_entries_; ++i) {
569 const std::pair<Index, Fractional> entry = entries[i];
570 if (entry.second == 0.0) continue;
571 if (i + 1 == num_entries_ || entry.first != entries[i + 1].first) {
572 MutableIndex(new_size) = entry.first;
573 MutableCoefficient(new_size) = entry.second;
574 ++new_size;
575 }
576 }
577 ResizeDown(new_size);
578 may_contain_duplicates_ = false;
579}
580
581template <typename IndexType, typename IteratorType>
583 Index previous_index(-1);
584 for (const EntryIndex i : AllEntryIndices()) {
585 const Index index = GetIndex(i);
586 if (index <= previous_index || GetCoefficient(i) == 0.0) return false;
587 previous_index = index;
589 may_contain_duplicates_ = false;
590 return true;
591}
592
593template <typename IndexType, typename IteratorType>
595 const SparseVector& sparse_vector) {
596 // Clear the sparse vector before reserving the new capacity. If we didn't do
597 // this, Reserve would have to copy the current contents of the vector if it
598 // allocated a new buffer. This would be wasteful, since we overwrite it in
599 // the next step anyway.
600 Clear();
601 Reserve(sparse_vector.capacity_);
602 // If there are no entries, then sparse_vector.index_ or .coefficient_
603 // may be nullptr or invalid, and accessing them in memmove is UB,
604 // even if the moved size is zero.
605 if (sparse_vector.num_entries_ > 0) {
606 // NOTE(user): Using a single memmove would be slightly faster, but it
607 // would not work correctly if this already had a greater capacity than
608 // sparse_vector, because the coefficient_ pointer would be positioned
609 // incorrectly.
610 std::memmove(index_, sparse_vector.index_,
611 sizeof(Index) * sparse_vector.num_entries_.value());
612 std::memmove(coefficient_, sparse_vector.coefficient_,
613 sizeof(Fractional) * sparse_vector.num_entries_.value());
614 }
615 num_entries_ = sparse_vector.num_entries_;
616 may_contain_duplicates_ = sparse_vector.may_contain_duplicates_;
617}
618
619template <typename IndexType, typename IteratorType>
621 const DenseVector& dense_vector) {
622 Clear();
623 const Index num_indices(dense_vector.size());
624 for (Index index(0); index < num_indices; ++index) {
625 if (dense_vector[index] != 0.0) {
626 SetCoefficient(index, dense_vector[index]);
627 }
628 }
629 may_contain_duplicates_ = false;
630}
631
632template <typename IndexType, typename IteratorType>
634 const SparseVector& sparse_vector, Index offset) {
635 for (const EntryIndex i : sparse_vector.AllEntryIndices()) {
636 const Index new_index = offset + sparse_vector.GetIndex(i);
637 DCHECK_GE(new_index, 0);
638 AddEntry(new_index, sparse_vector.GetCoefficient(i));
640 may_contain_duplicates_ = true;
641}
642
643template <typename IndexType, typename IteratorType>
645 StrictITIVector<IndexType, bool>* boolean_vector) const {
646 RETURN_VALUE_IF_NULL(boolean_vector, false);
647 // Note(user): Using num_entries() or any function that call
648 // CheckNoDuplicates() again will cause an infinite loop!
649 if (!may_contain_duplicates_ || num_entries_ <= 1) return true;
651 // Update size if needed.
652 const Index max_index =
653 *std::max_element(index_, index_ + num_entries_.value());
654 if (boolean_vector->size() <= max_index) {
655 boolean_vector->resize(max_index + 1, false);
656 }
657
658 may_contain_duplicates_ = false;
659 for (const EntryIndex i : AllEntryIndices()) {
660 const Index index = GetIndex(i);
661 if ((*boolean_vector)[index]) {
662 may_contain_duplicates_ = true;
663 break;
664 }
665 (*boolean_vector)[index] = true;
666 }
667
668 // Reset boolean_vector to false.
669 for (const EntryIndex i : AllEntryIndices()) {
670 (*boolean_vector)[GetIndex(i)] = false;
671 }
672 return !may_contain_duplicates_;
673}
674
675template <typename IndexType, typename IteratorType>
677 // Using num_entries() or any function in that will call CheckNoDuplicates()
678 // again will cause an infinite loop!
679 if (!may_contain_duplicates_ || num_entries_ <= 1) return true;
680 StrictITIVector<Index, bool> boolean_vector;
681 return CheckNoDuplicates(&boolean_vector);
683
684// Do not filter out zero values, as a zero value can be added to reset a
685// previous value. Zero values and duplicates will be removed by CleanUp.
686template <typename IndexType, typename IteratorType>
689 AddEntry(index, value);
690 may_contain_duplicates_ = true;
691}
692
693template <typename IndexType, typename IteratorType>
695 DCHECK(CheckNoDuplicates());
696 EntryIndex i(0);
697 const EntryIndex end(num_entries());
698 while (i < end && GetIndex(i) != index) {
699 ++i;
701 if (i == end) return;
702 const int num_moved_entries = (num_entries_ - i).value() - 1;
703 std::memmove(index_ + i.value(), index_ + i.value() + 1,
704 sizeof(Index) * num_moved_entries);
705 std::memmove(coefficient_ + i.value(), coefficient_ + i.value() + 1,
706 sizeof(Fractional) * num_moved_entries);
707 --num_entries_;
708}
709
710template <typename IndexType, typename IteratorType>
712 Fractional threshold) {
713 DCHECK(CheckNoDuplicates());
714 EntryIndex new_index(0);
715 for (const EntryIndex i : AllEntryIndices()) {
716 const Fractional magnitude = fabs(GetCoefficient(i));
717 if (magnitude > threshold) {
718 MutableIndex(new_index) = GetIndex(i);
719 MutableCoefficient(new_index) = GetCoefficient(i);
720 ++new_index;
721 }
722 }
723 ResizeDown(new_index);
724}
725
726template <typename IndexType, typename IteratorType>
728 Fractional threshold, const DenseVector& weights) {
729 DCHECK(CheckNoDuplicates());
730 EntryIndex new_index(0);
731 for (const EntryIndex i : AllEntryIndices()) {
732 if (fabs(GetCoefficient(i)) * weights[GetIndex(i)] > threshold) {
733 MutableIndex(new_index) = GetIndex(i);
734 MutableCoefficient(new_index) = GetCoefficient(i);
735 ++new_index;
736 }
737 }
738 ResizeDown(new_index);
739}
740
741template <typename IndexType, typename IteratorType>
743 Index index) {
744 DCHECK(CheckNoDuplicates());
745 for (const EntryIndex i : AllEntryIndices()) {
746 if (GetIndex(i) == index) {
747 std::swap(MutableIndex(EntryIndex(0)), MutableIndex(i));
748 std::swap(MutableCoefficient(EntryIndex(0)), MutableCoefficient(i));
749 return;
750 }
751 }
752}
753
754template <typename IndexType, typename IteratorType>
756 Index index) {
757 DCHECK(CheckNoDuplicates());
758 const EntryIndex last_entry = num_entries() - 1;
759 for (const EntryIndex i : AllEntryIndices()) {
760 if (GetIndex(i) == index) {
761 std::swap(MutableIndex(last_entry), MutableIndex(i));
762 std::swap(MutableCoefficient(last_entry), MutableCoefficient(i));
763 return;
764 }
765 }
766}
767
768template <typename IndexType, typename IteratorType>
770 Fractional factor) {
771 for (const EntryIndex i : AllEntryIndices()) {
772 MutableCoefficient(i) *= factor;
773 }
774}
776template <typename IndexType, typename IteratorType>
778 const DenseVector& factors) {
779 for (const EntryIndex i : AllEntryIndices()) {
780 MutableCoefficient(i) *= factors[GetIndex(i)];
781 }
782}
784template <typename IndexType, typename IteratorType>
786 Fractional factor) {
787 for (const EntryIndex i : AllEntryIndices()) {
788 MutableCoefficient(i) /= factor;
789 }
790}
792template <typename IndexType, typename IteratorType>
794 const DenseVector& factors) {
795 for (const EntryIndex i : AllEntryIndices()) {
796 MutableCoefficient(i) /= factors[GetIndex(i)];
797 }
798}
800template <typename IndexType, typename IteratorType>
802 Index num_indices, DenseVector* dense_vector) const {
803 RETURN_IF_NULL(dense_vector);
804 dense_vector->AssignToZero(num_indices);
805 for (const EntryIndex i : AllEntryIndices()) {
806 (*dense_vector)[GetIndex(i)] = GetCoefficient(i);
808}
809
810template <typename IndexType, typename IteratorType>
812 const IndexPermutation& index_perm, Index num_indices,
813 DenseVector* dense_vector) const {
814 RETURN_IF_NULL(dense_vector);
815 dense_vector->AssignToZero(num_indices);
816 for (const EntryIndex i : AllEntryIndices()) {
817 (*dense_vector)[index_perm[GetIndex(i)]] = GetCoefficient(i);
818 }
819}
820
821template <typename IndexType, typename IteratorType>
823 Fractional multiplier, DenseVector* dense_vector) const {
824 RETURN_IF_NULL(dense_vector);
825 if (multiplier == 0.0) return;
826 for (const EntryIndex i : AllEntryIndices()) {
827 (*dense_vector)[GetIndex(i)] += multiplier * GetCoefficient(i);
829}
830
831template <typename IndexType, typename IteratorType>
834 Fractional multiplier, Index removed_common_index,
835 Fractional drop_tolerance, SparseVector* accumulator_vector) const {
836 AddMultipleToSparseVectorInternal(true, multiplier, removed_common_index,
837 drop_tolerance, accumulator_vector);
839
840template <typename IndexType, typename IteratorType>
843 Fractional multiplier, Index removed_common_index,
844 Fractional drop_tolerance, SparseVector* accumulator_vector) const {
845 AddMultipleToSparseVectorInternal(false, multiplier, removed_common_index,
846 drop_tolerance, accumulator_vector);
848
849template <typename IndexType, typename IteratorType>
851 bool delete_common_index, Fractional multiplier, Index common_index,
852 Fractional drop_tolerance, SparseVector* accumulator_vector) const {
853 // DCHECK that the input is correct.
854 DCHECK(IsCleanedUp());
855 DCHECK(accumulator_vector->IsCleanedUp());
856 DCHECK(CheckNoDuplicates());
857 DCHECK(accumulator_vector->CheckNoDuplicates());
858 DCHECK_NE(0.0, LookUpCoefficient(common_index));
859 DCHECK_NE(0.0, accumulator_vector->LookUpCoefficient(common_index));
860
861 // Implementation notes: we create a temporary SparseVector "c" to hold the
862 // result. We call "a" the first vector (i.e. the current object, which will
863 // be multiplied by "multiplier"), and "b" the second vector (which will be
864 // swapped with "c" at the end to hold the result).
865 // We incrementally build c as: a * multiplier + b.
866 const SparseVector& a = *this;
867 const SparseVector& b = *accumulator_vector;
868 SparseVector c;
869 EntryIndex ia(0); // Index in the vector "a"
870 EntryIndex ib(0); // ... and "b"
871 EntryIndex ic(0); // ... and "c"
872 const EntryIndex size_a = a.num_entries();
873 const EntryIndex size_b = b.num_entries();
874 const int size_adjustment = delete_common_index ? -2 : 0;
875 const EntryIndex new_size_upper_bound = size_a + size_b + size_adjustment;
876 c.Reserve(new_size_upper_bound);
877 c.num_entries_ = new_size_upper_bound;
878 while ((ia < size_a) && (ib < size_b)) {
879 const Index index_a = a.GetIndex(ia);
880 const Index index_b = b.GetIndex(ib);
881 // Benchmarks done by fdid@ in 2012 showed that it was faster to put the
882 // "if" clauses in that specific order.
883 if (index_a == index_b) {
884 if (index_a != common_index) {
885 const Fractional a_coeff_mul = multiplier * a.GetCoefficient(ia);
886 const Fractional b_coeff = b.GetCoefficient(ib);
887 const Fractional sum = a_coeff_mul + b_coeff;
888
889 // We do not want to leave near-zero entries.
890 // TODO(user): expose the tolerance used here.
891 if (std::abs(sum) > drop_tolerance) {
892 c.MutableIndex(ic) = index_a;
893 c.MutableCoefficient(ic) = sum;
894 ++ic;
895 }
896 } else if (!delete_common_index) {
897 c.MutableIndex(ic) = b.GetIndex(ib);
898 c.MutableCoefficient(ic) = b.GetCoefficient(ib);
899 ++ic;
900 }
901 ++ia;
902 ++ib;
903 } else if (index_a < index_b) {
904 const Fractional new_value = multiplier * a.GetCoefficient(ia);
905 if (std::abs(new_value) > drop_tolerance) {
906 c.MutableIndex(ic) = index_a;
907 c.MutableCoefficient(ic) = new_value;
908 ++ic;
909 }
910 ++ia;
911 } else { // index_b < index_a
912 c.MutableIndex(ic) = b.GetIndex(ib);
913 c.MutableCoefficient(ic) = b.GetCoefficient(ib);
914 ++ib;
915 ++ic;
916 }
917 }
918 while (ia < size_a) {
919 const Fractional new_value = multiplier * a.GetCoefficient(ia);
920 if (std::abs(new_value) > drop_tolerance) {
921 c.MutableIndex(ic) = a.GetIndex(ia);
922 c.MutableCoefficient(ic) = new_value;
923 ++ic;
924 }
925 ++ia;
926 }
927 while (ib < size_b) {
928 c.MutableIndex(ic) = b.GetIndex(ib);
929 c.MutableCoefficient(ic) = b.GetCoefficient(ib);
930 ++ib;
931 ++ic;
932 }
933 c.ResizeDown(ic);
934 c.may_contain_duplicates_ = false;
935 c.Swap(accumulator_vector);
936 DCHECK(accumulator_vector->IsCleanedUp());
937}
938
939template <typename IndexType, typename IteratorType>
941 const IndexPermutation& index_perm) {
942 for (const EntryIndex i : AllEntryIndices()) {
943 MutableIndex(i) = index_perm[GetIndex(i)];
944 }
945}
947template <typename IndexType, typename IteratorType>
949 const IndexPermutation& index_perm) {
950 EntryIndex new_index(0);
951 for (const EntryIndex i : AllEntryIndices()) {
952 const Index index = GetIndex(i);
953 if (index_perm[index] >= 0) {
954 MutableIndex(new_index) = index_perm[index];
955 MutableCoefficient(new_index) = GetCoefficient(i);
956 ++new_index;
957 }
958 }
959 ResizeDown(new_index);
960}
961
962template <typename IndexType, typename IteratorType>
964 const IndexPermutation& index_perm, SparseVector* output) {
965 // Note that this function is called many times, so performance does matter
966 // and it is why we optimized the "nothing to do" case.
967 const EntryIndex end(num_entries_);
968 EntryIndex i(0);
969 while (true) {
970 if (i >= end) return; // "nothing to do" case.
971 if (index_perm[GetIndex(i)] >= 0) break;
972 ++i;
973 }
974 output->AddEntry(GetIndex(i), GetCoefficient(i));
975 for (EntryIndex j(i + 1); j < end; ++j) {
976 if (index_perm[GetIndex(j)] < 0) {
977 MutableIndex(i) = GetIndex(j);
978 MutableCoefficient(i) = GetCoefficient(j);
979 ++i;
980 } else {
981 output->AddEntry(GetIndex(j), GetCoefficient(j));
982 }
983 }
984 ResizeDown(i);
985
986 // TODO(user): In the way we use this function, we know that will not
987 // happen, but it is better to be careful so we can check that properly in
988 // debug mode.
989 output->may_contain_duplicates_ = true;
990}
991
992template <typename IndexType, typename IteratorType>
994 Index index) const {
995 Fractional value(0.0);
996 for (const EntryIndex i : AllEntryIndices()) {
997 if (GetIndex(i) == index) {
998 // Keep in mind the vector may contains several entries with the same
999 // index. In such a case the last one is returned.
1000 // TODO(user): investigate whether an optimized version of
1001 // LookUpCoefficient for "clean" columns yields speed-ups.
1002 value = GetCoefficient(i);
1003 }
1004 }
1005 return value;
1006}
1007
1008template <typename IndexType, typename IteratorType>
1010 const SparseVector& other) const {
1011 // We do not take into account the mutable value may_contain_duplicates_.
1012 if (num_entries() != other.num_entries()) return false;
1013 for (const EntryIndex i : AllEntryIndices()) {
1014 if (GetIndex(i) != other.GetIndex(i)) return false;
1015 if (GetCoefficient(i) != other.GetCoefficient(i)) return false;
1016 }
1017 return true;
1018}
1019
1020template <typename IndexType, typename IteratorType>
1022 std::string s;
1023 for (const EntryIndex i : AllEntryIndices()) {
1024 if (i != 0) s += ", ";
1025 absl::StrAppendFormat(&s, "[%d]=%g", GetIndex(i).value(),
1026 GetCoefficient(i));
1028 return s;
1029}
1030
1031} // namespace glop
1032} // namespace operations_research
1033
1034#endif // OR_TOOLS_LP_DATA_SPARSE_VECTOR_H_
EntryIndex i_
The index of the sparse vector entry represented by this object.
SparseVectorEntry(const Index *indices, const Fractional *coefficients, EntryIndex i)
StrictITIVector< Index, Fractional > DenseVector
void RemoveNearZeroEntries(Fractional threshold)
EntryIndex num_entries() const
Note this method can only be used when the vector has no duplicates.
void ApplyPartialIndexPermutation(const IndexPermutation &index_perm)
::util::IntegerRange< EntryIndex > AllEntryIndices() const
void ApplyIndexPermutation(const IndexPermutation &index_perm)
Applies the index permutation to all entries: index = index_perm[index];.
void AddMultipleToSparseVectorAndDeleteCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void ComponentWiseMultiply(const DenseVector &factors)
void PopulateFromSparseVector(const SparseVector &sparse_vector)
void ClearAndRelease()
Clears the vector and releases the memory it uses.
SparseVector & operator=(const SparseVector &other)
void RemoveNearZeroEntriesWithWeights(Fractional threshold, const DenseVector &weights)
bool IsEmpty() const
Returns true if the vector is empty.
SparseVector(const SparseVector &other)
void Clear()
Clears the vector, i.e. removes all entries.
void AppendEntriesWithOffset(const SparseVector &sparse_vector, Index offset)
Fractional GetCoefficient(EntryIndex i) const
void ResizeDown(EntryIndex new_size)
Index * index_
Pointers to the first elements of the index and coefficient arrays.
void ComponentWiseDivide(const DenseVector &factors)
void SetCoefficient(Index index, Fractional value)
Defines the coefficient at index, i.e. vector[index] = value;.
Fractional & MutableCoefficient(EntryIndex i)
void AddMultipleToSparseVectorAndIgnoreCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void PopulateFromDenseVector(const DenseVector &dense_vector)
void AddEntry(Index index, Fractional value)
void MoveTaggedEntriesTo(const IndexPermutation &index_perm, SparseVector *output)
void DivideByConstant(Fractional factor)
void Reserve(EntryIndex new_capacity)
Reserve the underlying storage for the given number of entries.
Index GetLastIndex() const
Like GetFirst*, but for the last entry.
void MultiplyByConstant(Fractional factor)
void PermutedCopyToDenseVector(const IndexPermutation &index_perm, Index num_indices, DenseVector *dense_vector) const
void AddMultipleToDenseVector(Fractional multiplier, DenseVector *dense_vector) const
bool IsEqualTo(const SparseVector &other) const
void CopyToDenseVector(Index num_indices, DenseVector *dense_vector) const
Fractional LookUpCoefficient(Index index) const
int64_t b
Definition table.cc:45
int64_t a
Definition table.cc:44
int64_t value
absl::Span< const double > coefficients
int index
IntegerValue GetCoefficient(const IntegerVariable var, const LinearExpression &expr)
In SWIG mode, we don't want anything besides these top-level includes.
#define RETURN_IF_NULL(x)
#define RETURN_VALUE_IF_NULL(x, v)
std::optional< int64_t > end