Google OR-Tools v9.11
a fast and portable software suite for combinatorial optimization
Loading...
Searching...
No Matches
reduced_costs.cc
Go to the documentation of this file.
1// Copyright 2010-2024 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdlib>
18#include <random>
19
20#include "absl/log/check.h"
21#include "absl/random/bit_gen_ref.h"
24#include "ortools/glop/parameters.pb.h"
31#include "ortools/util/bitset.h"
32#include "ortools/util/stats.h"
33
34#ifdef OMP
35#include <omp.h>
36#endif
37
39
40namespace operations_research {
41namespace glop {
42
44 const DenseRow& objective,
45 const RowToColMapping& basis,
46 const VariablesInfo& variables_info,
47 const BasisFactorization& basis_factorization,
48 absl::BitGenRef random)
49 : matrix_(matrix),
50 objective_(objective),
51 basis_(basis),
52 variables_info_(variables_info),
53 basis_factorization_(basis_factorization),
54 random_(random),
55 parameters_(),
56 stats_(),
57 must_refactorize_basis_(false),
58 recompute_basic_objective_left_inverse_(true),
59 recompute_basic_objective_(true),
60 recompute_reduced_costs_(true),
61 are_reduced_costs_precise_(false),
62 are_reduced_costs_recomputed_(false),
63 basic_objective_(),
64 reduced_costs_(),
65 basic_objective_left_inverse_(),
66 dual_feasibility_tolerance_() {}
67
69 return must_refactorize_basis_;
70}
71
73 ColIndex entering_col, const ScatteredColumn& direction) {
74 SCOPED_TIME_STAT(&stats_);
75 if (recompute_basic_objective_) {
76 ComputeBasicObjective();
77 }
78 const Fractional old_reduced_cost = reduced_costs_[entering_col];
79 const Fractional precise_reduced_cost =
80 objective_[entering_col] + cost_perturbations_[entering_col] -
81 ScalarProduct(basic_objective_, direction);
82
83 // Update the reduced cost of the entering variable with the precise version.
84 reduced_costs_[entering_col] = precise_reduced_cost;
85
86 // At this point, we have an entering variable that will move the objective in
87 // the good direction. We check the precision of the reduced cost and edges
88 // norm, but even if they are imprecise, we finish this pivot and will
89 // recompute them during the next call to ChooseEnteringColumn().
90
91 // Estimate the accuracy of the reduced costs using the entering variable.
92 if (!recompute_reduced_costs_) {
93 const Fractional estimated_reduced_costs_accuracy =
94 old_reduced_cost - precise_reduced_cost;
95 const Fractional scale =
96 (std::abs(precise_reduced_cost) <= 1.0) ? 1.0 : precise_reduced_cost;
97 stats_.reduced_costs_accuracy.Add(estimated_reduced_costs_accuracy / scale);
98 if (std::abs(estimated_reduced_costs_accuracy) / scale >
99 parameters_.recompute_reduced_costs_threshold()) {
100 VLOG(1) << "Recomputing reduced costs, value = " << precise_reduced_cost
101 << " error = "
102 << std::abs(precise_reduced_cost - old_reduced_cost);
104 }
105 }
106
107 return precise_reduced_cost;
108}
109
111 SCOPED_TIME_STAT(&stats_);
112 Fractional dual_residual_error(0.0);
113 const RowIndex num_rows = matrix_.num_rows();
114 const DenseRow& dual_values = Transpose(GetDualValues());
115 for (RowIndex row(0); row < num_rows; ++row) {
116 const ColIndex basic_col = basis_[row];
117 const Fractional residual =
118 objective_[basic_col] + cost_perturbations_[basic_col] -
119 matrix_.ColumnScalarProduct(basic_col, dual_values);
120 dual_residual_error = std::max(dual_residual_error, std::abs(residual));
121 }
122 return dual_residual_error;
123}
124
126 SCOPED_TIME_STAT(&stats_);
127
128 // Trigger a recomputation if needed so that reduced_costs_ is valid.
130
131 Fractional maximum_dual_infeasibility = 0.0;
132 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
133 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
134 for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
135 const Fractional rc = reduced_costs_[col];
136 if ((can_increase.IsSet(col) && rc < 0.0) ||
137 (can_decrease.IsSet(col) && rc > 0.0)) {
138 maximum_dual_infeasibility =
139 std::max(maximum_dual_infeasibility, std::abs(rc));
140 }
141 }
142 return maximum_dual_infeasibility;
143}
144
146 SCOPED_TIME_STAT(&stats_);
147
148 // Trigger a recomputation if needed so that reduced_costs_ is valid.
150
151 Fractional maximum_dual_infeasibility = 0.0;
152 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
153 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
154 const DenseBitRow& is_boxed = variables_info_.GetNonBasicBoxedVariables();
155 for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
156 if (is_boxed[col]) continue;
157 const Fractional rc = reduced_costs_[col];
158 if ((can_increase.IsSet(col) && rc < 0.0) ||
159 (can_decrease.IsSet(col) && rc > 0.0)) {
160 maximum_dual_infeasibility =
161 std::max(maximum_dual_infeasibility, std::abs(rc));
162 }
163 }
164 return maximum_dual_infeasibility;
165}
166
168 SCOPED_TIME_STAT(&stats_);
169
170 // Trigger a recomputation if needed so that reduced_costs_ is valid.
172
173 Fractional dual_infeasibility_sum = 0.0;
174 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
175 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
176 for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
177 const Fractional rc = reduced_costs_[col];
178 if ((can_increase.IsSet(col) && rc < 0.0) ||
179 (can_decrease.IsSet(col) && rc > 0.0)) {
180 dual_infeasibility_sum += std::abs(std::abs(rc));
181 }
182 }
183 return dual_infeasibility_sum;
184}
185
186void ReducedCosts::UpdateBeforeBasisPivot(ColIndex entering_col,
187 RowIndex leaving_row,
188 const ScatteredColumn& direction,
189 UpdateRow* update_row) {
190 SCOPED_TIME_STAT(&stats_);
191 const ColIndex leaving_col = basis_[leaving_row];
192 DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
193 DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
194
195 // If we are recomputing everything when requested, no need to update.
196 if (!recompute_reduced_costs_) {
197 UpdateReducedCosts(entering_col, leaving_col, leaving_row,
198 direction[leaving_row], update_row);
199 }
200
201 // Note that it is important to update basic_objective_ AFTER calling
202 // UpdateReducedCosts().
203 UpdateBasicObjective(entering_col, leaving_row);
204}
205
207 Fractional* current_cost) {
208 DCHECK_NE(variables_info_.GetStatusRow()[col], VariableStatus::BASIC);
209 DCHECK_EQ(current_cost, &objective_[col]);
210 reduced_costs_[col] -= objective_[col];
211 *current_cost = 0.0;
212}
213
214void ReducedCosts::SetParameters(const GlopParameters& parameters) {
215 parameters_ = parameters;
216}
217
219 SCOPED_TIME_STAT(&stats_);
220 recompute_basic_objective_ = true;
221 recompute_basic_objective_left_inverse_ = true;
222 are_reduced_costs_precise_ = false;
223 SetRecomputeReducedCostsAndNotifyWatchers();
224}
225
227 SCOPED_TIME_STAT(&stats_);
228 recompute_basic_objective_ = true;
229 recompute_basic_objective_left_inverse_ = true;
230}
231
233 SCOPED_TIME_STAT(&stats_);
234 if (are_reduced_costs_precise_) return;
235 must_refactorize_basis_ = true;
236 recompute_basic_objective_left_inverse_ = true;
237 SetRecomputeReducedCostsAndNotifyWatchers();
238}
239
241 SCOPED_TIME_STAT(&stats_);
242 VLOG(1) << "Perturbing the costs ... ";
243
244 Fractional max_cost_magnitude = 0.0;
245 const ColIndex structural_size =
246 matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
247 for (ColIndex col(0); col < structural_size; ++col) {
248 max_cost_magnitude =
249 std::max(max_cost_magnitude, std::abs(objective_[col]));
250 }
251
252 cost_perturbations_.AssignToZero(matrix_.num_cols());
253 for (ColIndex col(0); col < structural_size; ++col) {
254 const Fractional objective = objective_[col];
255 const Fractional magnitude =
256 (1.0 + std::uniform_real_distribution<double>()(random_)) *
257 (parameters_.relative_cost_perturbation() * std::abs(objective) +
258 parameters_.relative_max_cost_perturbation() * max_cost_magnitude);
259 DCHECK_GE(magnitude, 0.0);
260
261 // The perturbation direction is such that a dual-feasible solution stays
262 // feasible. This is important.
263 const VariableType type = variables_info_.GetTypeRow()[col];
264 switch (type) {
266 break;
268 break;
270 cost_perturbations_[col] = magnitude;
271 break;
273 cost_perturbations_[col] = -magnitude;
274 break;
276 // Here we don't necessarily maintain the dual-feasibility of a dual
277 // feasible solution, however we can always shift the variable to its
278 // other bound (because it is boxed) to restore dual-feasiblity. This is
279 // done by MakeBoxedVariableDualFeasible() at the end of the dual
280 // phase-I algorithm.
281 if (objective > 0.0) {
282 cost_perturbations_[col] = magnitude;
283 } else if (objective < 0.0) {
284 cost_perturbations_[col] = -magnitude;
285 }
286 break;
287 }
288 }
289}
290
291void ReducedCosts::ShiftCostIfNeeded(bool increasing_rc_is_needed,
292 ColIndex col) {
293 SCOPED_TIME_STAT(&stats_);
294
295 // We always want a minimum step size, so if we have a negative step or
296 // a step that is really small, we will shift the cost of the given column.
297 const Fractional minimum_delta =
298 parameters_.degenerate_ministep_factor() * dual_feasibility_tolerance_;
299 if (increasing_rc_is_needed && reduced_costs_[col] <= -minimum_delta) return;
300 if (!increasing_rc_is_needed && reduced_costs_[col] >= minimum_delta) return;
301
302 const Fractional delta =
303 increasing_rc_is_needed ? minimum_delta : -minimum_delta;
304 IF_STATS_ENABLED(stats_.cost_shift.Add(reduced_costs_[col] + delta));
305 cost_perturbations_[col] -= reduced_costs_[col] + delta;
306 reduced_costs_[col] = -delta;
307 has_cost_shift_ = true;
308}
309
310bool ReducedCosts::StepIsDualDegenerate(bool increasing_rc_is_needed,
311 ColIndex col) {
312 if (increasing_rc_is_needed && reduced_costs_[col] >= 0.0) return true;
313 if (!increasing_rc_is_needed && reduced_costs_[col] <= 0.0) return true;
314 return false;
315}
316
318 SCOPED_TIME_STAT(&stats_);
319 has_cost_shift_ = false;
320 cost_perturbations_.AssignToZero(matrix_.num_cols());
321 recompute_basic_objective_ = true;
322 recompute_basic_objective_left_inverse_ = true;
323 are_reduced_costs_precise_ = false;
324 SetRecomputeReducedCostsAndNotifyWatchers();
325}
326
328 SCOPED_TIME_STAT(&stats_);
329 if (!are_reduced_costs_recomputed_) {
330 SetRecomputeReducedCostsAndNotifyWatchers();
331 }
332 return GetReducedCosts();
333}
334
336 SCOPED_TIME_STAT(&stats_);
337 if (basis_factorization_.IsRefactorized()) {
338 must_refactorize_basis_ = false;
339 }
340 if (recompute_reduced_costs_) {
341 ComputeReducedCosts();
342 }
343 return reduced_costs_.const_view();
344}
345
347 SCOPED_TIME_STAT(&stats_);
348 ComputeBasicObjectiveLeftInverse();
349 return Transpose(basic_objective_left_inverse_.values);
350}
351
352void ReducedCosts::ComputeBasicObjective() {
353 SCOPED_TIME_STAT(&stats_);
354 const ColIndex num_cols_in_basis = RowToColIndex(matrix_.num_rows());
355 cost_perturbations_.resize(matrix_.num_cols(), 0.0);
356 basic_objective_.resize(num_cols_in_basis, 0.0);
357 for (ColIndex col(0); col < num_cols_in_basis; ++col) {
358 const ColIndex basis_col = basis_[ColToRowIndex(col)];
359 basic_objective_[col] =
360 objective_[basis_col] + cost_perturbations_[basis_col];
361 }
362 recompute_basic_objective_ = false;
363 recompute_basic_objective_left_inverse_ = true;
364}
365
366void ReducedCosts::ComputeReducedCosts() {
367 SCOPED_TIME_STAT(&stats_);
368 if (recompute_basic_objective_left_inverse_) {
369 ComputeBasicObjectiveLeftInverse();
370 }
371 Fractional dual_residual_error(0.0);
372 const ColIndex num_cols = matrix_.num_cols();
373
374 reduced_costs_.resize(num_cols, 0.0);
375 const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
376#ifdef OMP
377 const int num_omp_threads = parameters_.num_omp_threads();
378#else
379 const int num_omp_threads = 1;
380#endif
381 if (num_omp_threads == 1) {
382 for (ColIndex col(0); col < num_cols; ++col) {
383 reduced_costs_[col] = objective_[col] + cost_perturbations_[col] -
384 matrix_.ColumnScalarProduct(
385 col, basic_objective_left_inverse_.values);
386
387 // We also compute the dual residual error y.B - c_B.
388 if (is_basic.IsSet(col)) {
389 dual_residual_error =
390 std::max(dual_residual_error, std::abs(reduced_costs_[col]));
391 }
392 }
393 } else {
394#ifdef OMP
395 // In the multi-threaded case, perform the same computation as in the
396 // single-threaded case above.
397 std::vector<Fractional> thread_local_dual_residual_error(num_omp_threads,
398 0.0);
399 const int parallel_loop_size = num_cols.value();
400#pragma omp parallel for num_threads(num_omp_threads)
401 for (int i = 0; i < parallel_loop_size; i++) {
402 const ColIndex col(i);
403 reduced_costs_[col] = objective_[col] + objective_perturbation_[col] -
404 matrix_.ColumnScalarProduct(
405 col, basic_objective_left_inverse_.values);
406
407 if (is_basic.IsSet(col)) {
408 thread_local_dual_residual_error[omp_get_thread_num()] =
409 std::max(thread_local_dual_residual_error[omp_get_thread_num()],
410 std::abs(reduced_costs_[col]));
411 }
412 }
413 // end of omp parallel for
414 for (int i = 0; i < num_omp_threads; i++) {
415 dual_residual_error =
416 std::max(dual_residual_error, thread_local_dual_residual_error[i]);
417 }
418#endif // OMP
419 }
420
421 deterministic_time_ +=
423 recompute_reduced_costs_ = false;
424 are_reduced_costs_recomputed_ = true;
425 are_reduced_costs_precise_ = basis_factorization_.IsRefactorized();
426
427 // It is not reasonable to have a dual tolerance lower than the current
428 // dual_residual_error, otherwise we may never terminate (This is happening on
429 // dfl001.mps with a low dual_feasibility_tolerance). Note that since we
430 // recompute the reduced costs with maximum precision before really exiting,
431 // it is fine to do a couple of iterations with a high zero tolerance.
432 dual_feasibility_tolerance_ = parameters_.dual_feasibility_tolerance();
433 if (dual_residual_error > dual_feasibility_tolerance_) {
434 VLOG(2) << "Changing dual_feasibility_tolerance to " << dual_residual_error;
435 dual_feasibility_tolerance_ = dual_residual_error;
436 }
437}
438
439void ReducedCosts::ComputeBasicObjectiveLeftInverse() {
440 SCOPED_TIME_STAT(&stats_);
441 if (recompute_basic_objective_) {
442 ComputeBasicObjective();
443 }
444 basic_objective_left_inverse_.values = basic_objective_;
445 basic_objective_left_inverse_.non_zeros.clear();
446 basis_factorization_.LeftSolve(&basic_objective_left_inverse_);
447 recompute_basic_objective_left_inverse_ = false;
448 IF_STATS_ENABLED(stats_.basic_objective_left_inverse_density.Add(
449 Density(basic_objective_left_inverse_.values)));
450
451 // TODO(user): Estimate its accuracy by a few scalar products, and refactorize
452 // if it is above a threshold?
453}
454
455// Note that the update is such than the entering reduced cost is always set to
456// 0.0. In particular, because of this we can step in the wrong direction for
457// the dual method if the reduced cost is slightly infeasible.
458void ReducedCosts::UpdateReducedCosts(ColIndex entering_col,
459 ColIndex leaving_col,
460 RowIndex leaving_row, Fractional pivot,
461 UpdateRow* update_row) {
462 DCHECK_NE(entering_col, leaving_col);
463 DCHECK_NE(pivot, 0.0);
464 if (recompute_reduced_costs_) return;
465
466 // Note that this is precise because of the CheckPrecision().
467 const Fractional entering_reduced_cost = reduced_costs_[entering_col];
468
469 // Nothing to do if the entering reduced cost is 0.0.
470 // This correspond to a dual degenerate pivot.
471 if (entering_reduced_cost == 0.0) {
472 VLOG(2) << "Reduced costs didn't change.";
473
474 // TODO(user): the reduced costs may still be "precise" in this case, but
475 // other parts of the code assume that if they are precise then the basis
476 // was just refactorized in order to recompute them which is not the case
477 // here. Clean this up.
478 are_reduced_costs_precise_ = false;
479 return;
480 }
481
482 are_reduced_costs_recomputed_ = false;
483 are_reduced_costs_precise_ = false;
484 update_row->ComputeUpdateRow(leaving_row);
485 SCOPED_TIME_STAT(&stats_);
486
487 // Update the leaving variable reduced cost.
488 // '-pivot' is the value of the entering_edge at 'leaving_row'.
489 // The edge of the 'leaving_col' in the new basis is equal to
490 // 'entering_edge / -pivot'.
491 const Fractional new_leaving_reduced_cost = entering_reduced_cost / -pivot;
492 auto rc = reduced_costs_.view();
493 auto update_coeffs = update_row->GetCoefficients().const_view();
494 for (const ColIndex col : update_row->GetNonZeroPositions()) {
495 rc[col] += new_leaving_reduced_cost * update_coeffs[col];
496 }
497 rc[leaving_col] = new_leaving_reduced_cost;
498
499 // In the dual, since we compute the update before selecting the entering
500 // variable, this cost is still in the update_position_list, so we make sure
501 // it is 0 here.
502 rc[entering_col] = 0.0;
503}
504
506 const Fractional reduced_cost = reduced_costs_[col];
507 const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
508 const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
509 const Fractional tolerance = dual_feasibility_tolerance_;
510 return (can_increase.IsSet(col) && (reduced_cost < -tolerance)) ||
511 (can_decrease.IsSet(col) && (reduced_cost > tolerance));
512}
513
514void ReducedCosts::UpdateBasicObjective(ColIndex entering_col,
515 RowIndex leaving_row) {
516 SCOPED_TIME_STAT(&stats_);
517 basic_objective_[RowToColIndex(leaving_row)] =
518 objective_[entering_col] + cost_perturbations_[entering_col];
519 recompute_basic_objective_left_inverse_ = true;
520}
521
522void ReducedCosts::SetRecomputeReducedCostsAndNotifyWatchers() {
523 recompute_reduced_costs_ = true;
524 for (bool* watcher : watchers_) *watcher = true;
525}
526
527PrimalPrices::PrimalPrices(absl::BitGenRef random,
528 const VariablesInfo& variables_info,
529 PrimalEdgeNorms* primal_edge_norms,
530 ReducedCosts* reduced_costs)
531 : prices_(random),
532 variables_info_(variables_info),
533 primal_edge_norms_(primal_edge_norms),
534 reduced_costs_(reduced_costs) {
535 reduced_costs_->AddRecomputationWatcher(&recompute_);
536 primal_edge_norms->AddRecomputationWatcher(&recompute_);
537}
538
539void PrimalPrices::UpdateBeforeBasisPivot(ColIndex entering_col,
540 UpdateRow* update_row) {
541 // If we are recomputing everything when requested, no need to update.
542 if (recompute_) return;
543
544 // Note that the set of positions works because both the reduced costs
545 // and the primal edge norms are updated on the same positions which are
546 // given by the update_row.
547 UpdateEnteringCandidates</*from_clean_state=*/false>(
548 update_row->GetNonZeroPositions());
549
550 // This should be redundant with the call above, except in degenerate
551 // cases where the update_row has a zero position on the entering col!
552 prices_.Remove(entering_col);
553}
554
556 if (recompute_) return;
557 if (reduced_costs_->IsValidPrimalEnteringCandidate(col)) {
558 const DenseRow::ConstView squared_norms =
559 primal_edge_norms_->GetSquaredNorms();
560 const DenseRow::ConstView reduced_costs = reduced_costs_->GetReducedCosts();
561 DCHECK_NE(0.0, squared_norms[col]);
562 prices_.AddOrUpdate(col, Square(reduced_costs[col]) / squared_norms[col]);
563 } else {
564 prices_.Remove(col);
565 }
566}
567
569 // If we need a recomputation, we cannot assumes that the reduced costs are
570 // valid until we are about to recompute the prices.
571 if (recompute_) return;
572
573 DCHECK(!reduced_costs_->IsValidPrimalEnteringCandidate(col));
574 prices_.Remove(col);
575}
576
578 if (recompute_) {
579 const DenseRow::ConstView reduced_costs = reduced_costs_->GetReducedCosts();
580 prices_.ClearAndResize(reduced_costs.size());
581 UpdateEnteringCandidates</*from_clean_state=*/true>(
582 variables_info_.GetIsRelevantBitRow());
583 recompute_ = false;
584 }
585 return prices_.GetMaximum();
586}
587
588// A variable is an entering candidate if it can move in a direction that
589// minimizes the objective. That is, its value needs to increase if its
590// reduced cost is negative or it needs to decrease if its reduced cost is
591// positive (see the IsValidPrimalEnteringCandidate() function). Note that
592// this is the same as a dual-infeasible variable.
593template <bool from_clean_state, typename ColumnsToUpdate>
594void PrimalPrices::UpdateEnteringCandidates(const ColumnsToUpdate& cols) {
595 const Fractional tolerance = reduced_costs_->GetDualFeasibilityTolerance();
596 const DenseBitRow::ConstView can_decrease =
597 variables_info_.GetCanDecreaseBitRow().const_view();
598 const DenseBitRow::ConstView can_increase =
599 variables_info_.GetCanIncreaseBitRow().const_view();
600 const DenseRow::ConstView squared_norms =
601 primal_edge_norms_->GetSquaredNorms();
602 const DenseRow::ConstView reduced_costs = reduced_costs_->GetReducedCosts();
603 for (const ColIndex col : cols) {
604 const Fractional reduced_cost = reduced_costs[col];
605
606 // Optimization for speed (The function is about 30% faster than the code in
607 // IsValidPrimalEnteringCandidate() or a switch() on variable_status[col]).
608 // This relies on the fact that (double1 > double2) returns a 1 or 0 result
609 // when converted to an int. It also uses an XOR (which appears to be
610 // faster) since the two conditions on the reduced cost are exclusive.
611 const bool is_dual_infeasible = Bitset64<ColIndex>::ConditionalXorOfTwoBits(
612 col, reduced_cost > tolerance, can_decrease, reduced_cost < -tolerance,
613 can_increase);
614 if (is_dual_infeasible) {
615 DCHECK(reduced_costs_->IsValidPrimalEnteringCandidate(col));
616 const Fractional price = Square(reduced_cost) / squared_norms[col];
617 prices_.AddOrUpdate(col, price);
618 } else {
619 DCHECK(!reduced_costs_->IsValidPrimalEnteringCandidate(col));
620 if (!from_clean_state) prices_.Remove(col);
621 }
622 }
623}
624
625} // namespace glop
626} // namespace operations_research
bool IsSet(IndexType i) const
Returns true if the bit at position i is set.
Definition bitset.h:538
static uint64_t ConditionalXorOfTwoBits(IndexType i, uint64_t use1, Bitset64< IndexType >::ConstView set1, uint64_t use2, Bitset64< IndexType >::ConstView set2)
Definition bitset.h:697
ConstView const_view() const
Definition bitset.h:464
bool IsRefactorized() const
Returns true if the factorization was just recomputed.
void LeftSolve(ScatteredRow *y) const
Left solves the system y.B = rhs, where y initially contains rhs.
EntryIndex num_entries() const
Returns the matrix dimensions. See same functions in SparseMatrix.
Definition sparse.h:401
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition sparse.h:434
void Remove(Index position)
Removes the given index from the set of candidates.
Definition pricing.h:169
void AddOrUpdate(Index position, Fractional value)
Definition pricing.h:190
void RecomputePriceAt(ColIndex col)
Triggers a recomputation of the price at the given column only.
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
PrimalPrices(absl::BitGenRef random, const VariablesInfo &variables_info, PrimalEdgeNorms *primal_edge_norms, ReducedCosts *reduced_costs)
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
Fractional GetDualFeasibilityTolerance() const
Returns the current dual feasibility tolerance.
void SetParameters(const GlopParameters &parameters)
Sets the pricing parameters. This does not change the pricing rule.
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)
Fractional TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
Does basic checking of an entering candidate.
void UpdateDataOnBasisPermutation()
Invalidates the data that depends on the order of the column in basis_.
ReducedCosts(const CompactSparseMatrix &matrix_, const DenseRow &objective, const RowToColMapping &basis, const VariablesInfo &variables_info, const BasisFactorization &basis_factorization, absl::BitGenRef random)
Takes references to the linear program data we need.
const DenseColumn & GetDualValues()
Returns the dual values associated to the current basis.
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
void ResetForNewObjective()
Invalidates all internal structure that depends on the objective function.
absl::Span< const ColIndex > GetNonZeroPositions() const
const DenseBitRow & GetCanDecreaseBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
const VariableStatusRow & GetStatusRow() const
const VariableTypeRow & GetTypeRow() const
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetIsRelevantBitRow() const
const DenseBitRow & GetNonBasicBoxedVariables() const
const DenseBitRow & GetIsBasicBitRow() const
SatParameters parameters
ColIndex col
Definition markowitz.cc:187
RowIndex row
Definition markowitz.cc:186
double Density(const DenseRow &row)
Definition lp_utils.cc:176
Fractional Square(Fractional f)
Definition lp_utils.h:41
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
Definition lp_utils.h:52
Bitset64< ColIndex > DenseBitRow
Row of bits.
Definition lp_types.h:377
const DenseRow & Transpose(const DenseColumn &col)
Similar comment as the other Transpose() implementation above.
Definition lp_utils.h:199
ColIndex RowToColIndex(RowIndex row)
Get the ColIndex corresponding to the column # row.
Definition lp_types.h:54
VariableType
Different types of variables.
Definition lp_types.h:180
RowIndex ColToRowIndex(ColIndex col)
Get the RowIndex corresponding to the row # col.
Definition lp_types.h:57
static double DeterministicTimeForFpOperations(int64_t n)
Definition lp_types.h:435
In SWIG mode, we don't want anything besides these top-level includes.
int64_t delta
Definition resource.cc:1709
#define IF_STATS_ENABLED(instructions)
Definition stats.h:417
#define SCOPED_TIME_STAT(stats)
Definition stats.h:418
StrictITIVector< Index, Fractional > values