OR-Tools  8.2
linear_relaxation.h
Go to the documentation of this file.
1// Copyright 2010-2018 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#ifndef OR_TOOLS_SAT_LINEAR_RELAXATION_H_
15#define OR_TOOLS_SAT_LINEAR_RELAXATION_H_
16
17#include <vector>
18
20#include "ortools/sat/integer.h"
23#include "ortools/sat/model.h"
24
25namespace operations_research {
26namespace sat {
27
29 std::vector<LinearConstraint> linear_constraints;
30 std::vector<std::vector<Literal>> at_most_ones;
31 std::vector<CutGenerator> cut_generators;
32};
33
34// If the given IntegerVariable is fully encoded (li <=> var == xi), adds to the
35// constraints vector the following linear relaxation of its encoding:
36// - Sum li == 1
37// - Sum li * xi == var
38// Note that all the literal (li) of the encoding must have an IntegerView,
39// otherwise this function just does nothing.
40//
41// Returns false, if the relaxation couldn't be added because this variable
42// was not fully encoded or not all its associated literal had a view.
43bool AppendFullEncodingRelaxation(IntegerVariable var, const Model& model,
44 LinearRelaxation* relaxation);
45
46// When the set of (li <=> var == xi) do not cover the full domain of xi, we
47// do something a bit more involved. Let min/max the min and max value of the
48// domain of var that is NOT part of the encoding. We add:
49// - Sum li <= 1
50// - (Sum li * xi) + (1 - Sum li) * min <= var
51// - var <= (Sum li * xi) + (1 - Sum li) * max
52//
53// Note that if it turns out that the partial encoding is full, this will just
54// use the same encoding as AppendFullEncodingRelaxation(). Any literal that
55// do not have an IntegerView will be skipped, there is no point adding them
56// to the LP if they are not used in any other constraint, the relaxation will
57// have the same "power" without them.
58void AppendPartialEncodingRelaxation(IntegerVariable var, const Model& model,
59 LinearRelaxation* relaxation);
60
61// This is a different relaxation that use a partial set of literal li such that
62// (li <=> var >= xi). In which case we use the following encoding:
63// - li >= l_{i+1} for all possible i. Note that the xi need to be sorted.
64// - var >= min + l0 * (x0 - min) + Sum_{i>0} li * (xi - x_{i-1})
65// - and same as above for NegationOf(var) for the upper bound.
66//
67// Like for AppendPartialEncodingRelaxation() we skip any li that do not have
68// an integer view.
70 const Model& model,
71 LinearRelaxation* relaxation);
72
73// Adds linearization of different types of constraints.
74void TryToLinearizeConstraint(const CpModelProto& model_proto,
75 const ConstraintProto& ct, Model* model,
76 int linearization_level,
77 LinearRelaxation* relaxation);
78
79// Adds linearization of no overlap constraints.
80// It adds an energetic equation linking the duration of all potential tasks to
81// the actual span of the no overlap constraint.
82void AppendNoOverlapRelaxation(const CpModelProto& model_proto,
83 const ConstraintProto& ct,
84 int linearization_level, Model* model,
85 LinearRelaxation* relaxation);
86
87// Adds linearization of cumulative constraints.The second part adds an
88// energetic equation linking the duration of all potential tasks to the actual
89// max span * capacity of the cumulative constraint.
90void AppendCumulativeRelaxation(const CpModelProto& model_proto,
91 const ConstraintProto& ct,
92 int linearization_level, Model* model,
93 LinearRelaxation* relaxation);
94
95// Adds linearization of int max constraints. This can also be used to linearize
96// int min with negated variables.
97void AppendMaxRelaxation(IntegerVariable target,
98 const std::vector<IntegerVariable>& vars,
99 int linearization_level, Model* model,
100 LinearRelaxation* relaxation);
101
102// Adds linearization of int max constraints. Returns a vector of z vars such
103// that: z_vars[l] == 1 <=> target = exprs[l].
104//
105// Consider the Lin Max constraint with d expressions and n variables in the
106// form: target = max {exprs[l] = Sum (wli * xi + bl)}. l in {1,..,d}.
107// Li = lower bound of xi
108// Ui = upper bound of xi.
109// Let zl be in {0,1} for all l in {1,..,d}.
110// The target = exprs[l] when zl = 1.
111//
112// The following is a valid linearization for Lin Max.
113// target >= exprs[l], for all l in {1,..,d}
114// target <= Sum_i(wki * xi) + Sum_l((Nkl + bl) * zl), for all k in {1,..,d}
115// Where Nkl is a large number defined as:
116// Nkl = Sum_i(max((wli - wki)*Li, (wli - wki)*Ui))
117// = Sum (max corner difference for variable i, target expr k, max expr l)
118// Reference: "Strong mixed-integer programming formulations for trained neural
119// networks" by Ross Anderson et. (https://arxiv.org/pdf/1811.01988.pdf).
120// TODO(user): Support linear expression as target.
121std::vector<IntegerVariable> AppendLinMaxRelaxation(
122 IntegerVariable target, const std::vector<LinearExpression>& exprs,
123 Model* model, LinearRelaxation* relaxation);
124
125// Appends linear constraints to the relaxation. This also handles the
126// relaxation of linear constraints with enforcement literals.
127// A linear constraint lb <= ax <= ub with enforcement literals {ei} is relaxed
128// as following.
129// lb <= (Sum Negated(ei) * (lb - implied_lb)) + ax <= inf
130// -inf <= (Sum Negated(ei) * (ub - implied_ub)) + ax <= ub
131// Where implied_lb and implied_ub are trivial lower and upper bounds of the
132// constraint.
133void AppendLinearConstraintRelaxation(const ConstraintProto& constraint_proto,
134 const int linearization_level,
135 const Model& model,
136 LinearRelaxation* relaxation);
137
138} // namespace sat
139} // namespace operations_research
140
141#endif // OR_TOOLS_SAT_LINEAR_RELAXATION_H_
Class that owns everything related to a particular optimization model.
Definition: sat/model.h:38
CpModelProto const * model_proto
const Constraint * ct
IntVar * var
Definition: expr_array.cc:1858
GRBmodel * model
std::vector< IntegerVariable > AppendLinMaxRelaxation(IntegerVariable target, const std::vector< LinearExpression > &exprs, Model *model, LinearRelaxation *relaxation)
void AppendLinearConstraintRelaxation(const ConstraintProto &constraint_proto, const int linearization_level, const Model &model, LinearRelaxation *relaxation)
bool AppendFullEncodingRelaxation(IntegerVariable var, const Model &model, LinearRelaxation *relaxation)
void AppendMaxRelaxation(IntegerVariable target, const std::vector< IntegerVariable > &vars, int linearization_level, Model *model, LinearRelaxation *relaxation)
void TryToLinearizeConstraint(const CpModelProto &model_proto, const ConstraintProto &ct, Model *model, int linearization_level, LinearRelaxation *relaxation)
void AppendPartialEncodingRelaxation(IntegerVariable var, const Model &model, LinearRelaxation *relaxation)
void AppendNoOverlapRelaxation(const CpModelProto &model_proto, const ConstraintProto &ct, int linearization_level, Model *model, LinearRelaxation *relaxation)
void AppendCumulativeRelaxation(const CpModelProto &model_proto, const ConstraintProto &ct, int linearization_level, Model *model, LinearRelaxation *relaxation)
void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, const Model &model, LinearRelaxation *relaxation)
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
std::vector< std::vector< Literal > > at_most_ones
std::vector< LinearConstraint > linear_constraints
std::vector< CutGenerator > cut_generators