Problems adding columns "Only linear constraints allowed"
AnsweredHello, this is a followup post to this one. I updated my code quite a bit but there are still some problems remaining.
This is my code:
from gurobipy import *
import gurobipy as gu
import pandas as pd
import time
import random
# Create Dataframes
import pandas as pd
I_list, T_list, K_list = [1, 2, 3], [1, 2, 3, 4, 5, 6, 7], [1, 2, 3]
I_list1 = pd.DataFrame(I_list, columns=['I'])
T_list1 = pd.DataFrame(T_list, columns=['T'])
K_list1 = pd.DataFrame(K_list, columns=['K'])
DataDF = pd.concat([I_list1, T_list1, K_list1], axis=1)
Demand_Dict = {(1, 1): 2, (1, 2): 1, (1, 3): 0, (2, 1): 1, (2, 2): 2, (2, 3): 0, (3, 1): 1, (3, 2): 1, (3, 3): 1,
(4, 1): 1, (4, 2): 2, (4, 3): 0, (5, 1): 2, (5, 2): 0, (5, 3): 1, (6, 1): 1, (6, 2): 1, (6, 3): 1,
(7, 1): 0, (7, 2): 3, (7, 3): 0}
# Generate Alpha
def gen_alpha(seed):
random.seed(seed)
return {(t): round(random.random(), 3) for t in range(1, 8)}
# General Parameter
max_itr, seed = 10, 123
class MasterProblem:
def __init__(self, dfData, DemandDF, max_iteration, current_iteration):
self.iteration = current_iteration
self.max_iteration = max_iteration
self.nurses = dfData['I'].dropna().astype(int).unique().tolist()
self.days = dfData['T'].dropna().astype(int).unique().tolist()
self.shifts = dfData['K'].dropna().astype(int).unique().tolist()
self._current_iteration = current_iteration
self.roster = [i for i in range(1, self.max_iteration + 2)]
self.rosterinitial = [i for i in range(1, 2)]
self.demand = DemandDF
self.model = gu.Model("MasterProblem")
self.cons_demand = {}
self.cons_demand_2 = {}
self.newvar = {}
self.cons_lmbda = {}
def buildModel(self):
self.generateVariables()
self.generateConstraints()
self.model.update()
self.generateObjective()
self.model.update()
def generateVariables(self):
self.slack = self.model.addVars(self.days, self.shifts, vtype=gu.GRB.CONTINUOUS, lb=0, name='slack')
self.motivation_i = self.model.addVars(self.days, self.shifts, self.roster, vtype=gu.GRB.CONTINUOUS, lb=0, ub=1, name='motivation_i')
self.x_i = self.model.addVars(self.days, self.shifts, self.roster, vtype=gu.GRB.BINARY, name='x_i')
self.lmbda = self.model.addVars(self.roster, vtype=gu.GRB.INTEGER, lb=0, name='lmbda')
def generateConstraints(self):
self.cons_lmbda = self.model.addConstr(len(self.nurses) == gu.quicksum(self.lmbda[r] for r in self.rosterinitial), name = "lmb")
for t in self.days:
for s in self.shifts:
self.cons_demand[t, s] = self.model.addQConstr(gu.quicksum(self.motivation_i[t, s, r]*self.lmbda[r] for r in self.rosterinitial) + self.slack[t, s] >= self.demand[t, s], "demand("+str(t)+","+str(s)+")")
return self.cons_lmbda, self.cons_demand
def generateObjective(self):
self.model.setObjective(gu.quicksum(self.slack[t, s] for t in self.days for s in self.shifts), sense=gu.GRB.MINIMIZE)
def solveRelaxModel(self):
self.model.Params.QCPDual = 1
self.model.Params.NonConvex = 2
for v in self.model.getVars():
v.setAttr('vtype', 'C')
self.model.optimize()
def getDuals_i(self):
Pi_cons_lmbda = self.model.getAttr("Pi", self.cons_lmbda)
return Pi_cons_lmbda
def getDuals_ts(self):
Pi_cons_demand = self.model.getAttr("QCPi", self.cons_demand)
return Pi_cons_demand
def updateModel(self):
self.model.update()
def setStartSolution(self):
for t in self.days:
for s in self.shifts:
self.model.addLConstr(0 == self.motivation_i[t, s, 1])
self.model.update()
def solveModel(self):
self.model.Params.QCPDual = 1
self.model.Params.OutputFlag = 0
self.model.optimize()
def addColumn(self, itr, schedule):
self.rosterIndex = itr + 1
for t in self.days:
for s in self.shifts:
qexpr = self.model.getQCRow(self.cons_demand[t, s])
qexpr.add(schedule[t, s, self.rosterIndex] * self.lmbda[self.rosterIndex], 1)
rhs = self.cons_demand[t, s].getAttr('QCRHS')
sense = self.cons_demand[t, s].getAttr('QCSense')
name = self.cons_demand[t, s].getAttr('QCName')
newcon = self.model.addQConstr(qexpr, sense, rhs, name)
self.model.remove(self.cons_demand[t, s])
self.cons_demand[t, s] = newcon
self.model.update()
def addLambda(self, itr):
self.rosterIndex = itr + 1
self.newlmbcoef = 1.0
current_lmb_cons = self.cons_lmbda
expr = self.model.getRow(current_lmb_cons)
new_lmbcoef = self.newlmbcoef
expr.add(self.lmbda[self.rosterIndex], new_lmbcoef)
rhs_lmb = current_lmb_cons.getAttr('RHS')
sense_lmb = current_lmb_cons.getAttr('Sense')
name_lmb = current_lmb_cons.getAttr('ConstrName')
newconlmb = self.model.addLConstr(expr, sense_lmb, rhs_lmb, name_lmb)
self.model.remove(current_lmb_cons)
self.cons_lmbda = newconlmb
def finalSolve(self):
self.model.setAttr("vType", self.lmbda, gu.GRB.INTEGER)
self.model.update()
self.model.optimize()
class Subproblem:
def __init__(self, duals_i, duals_ts, dfData, M, iteration, alpha):
itr = iteration + 1
self.days = dfData['T'].dropna().astype(int).unique().tolist()
self.shifts = dfData['K'].dropna().astype(int).unique().tolist()
self.duals_i = duals_i
self.duals_ts = duals_ts
self.Max = 5
self.Min = 2
self.M = M
self.alpha = alpha
self.model = gu.Model("Subproblem")
self.itr = itr
def buildModel(self):
self.generateVariables()
self.generateConstraints()
self.generateObjective()
self.model.update()
def generateVariables(self):
self.x = self.model.addVars(self.days, self.shifts, [self.itr], vtype=gu.GRB.BINARY, name='x')
self.y = self.model.addVars(self.days, vtype=gu.GRB.BINARY, name='y')
self.mood = self.model.addVars(self.days, vtype=gu.GRB.CONTINUOUS, lb=0, name='mood')
self.motivation = self.model.addVars(self.days, self.shifts, [self.itr], vtype=gu.GRB.CONTINUOUS, lb=0, name='motivation')
def generateConstraints(self):
for t in self.days:
self.model.addLConstr(self.mood[t] == 1 self.alpha[t])
self.model.addLConstr(quicksum(self.x[t, s, self.itr] for s in self.shifts) == self.y[t])
self.model.addLConstr(gu.quicksum(self.x[t, s, self.itr] for s in self.shifts) <= 1)
for s in self.shifts:
self.model.addLConstr(self.motivation[t, s, self.itr] >= self.mood[t]  self.M * (1  self.x[t, s, self.itr]))
self.model.addLConstr(self.motivation[t, s, self.itr] <= self.mood[t] + self.M * (1  self.x[t, s, self.itr]))
self.model.addLConstr(self.motivation[t, s, self.itr] <= self.x[t, s, self.itr])
for t in range(1, len(self.days)  self.Max + 1):
self.model.addLConstr(gu.quicksum(self.y[u] for u in range(t, t + 1 + self.Max)) <= self.Max)
self.model.addLConstr(self.Min <= quicksum(self.y[t] for t in self.days))
def generateObjective(self):
self.model.setObjective(0  gu.quicksum(self.motivation[t, s, self.itr] * self.duals_ts[t, s] for t in self.days for s in self.shifts)  self.duals_i, sense=gu.GRB.MINIMIZE)
def getNewSchedule(self):
return self.model.getAttr("X", self.motivation)
#### Column Generation
modelImprovable = True
t0 = time.time()
itr = 0
# Build & Solve MP
master = MasterProblem(DataDF, Demand_Dict, max_itr, itr)
master.buildModel()
master.updateModel()
master.setStartSolution()
master.solveRelaxModel()
# Get Duals from MP
duals_i, duals_ts = master.getDuals_i(), master.getDuals_ts()
t0 = time.time()
while (modelImprovable) and itr < max_itr:
# Start
itr += 1
print('* Current CG iteration: ', itr)
# Solve RMP
master.current_iteration = itr + 1
master.solveRelaxModel()
# Get Duals
duals_i = master.getDuals_i()
duals_ts = master.getDuals_ts()
# Solve SPs
modelImprovable = False
subproblem = Subproblem(duals_i, duals_ts, DataDF,1e6, itr, gen_alpha(seed))
subproblem.buildModel()
subproblem.model.optimize()
reducedCost = subproblem.model.objval
if reducedCost < 1e6:
Schedules = subproblem.getNewSchedule()
master.addColumn(itr, Schedules)
master.addLambda(itr)
master.updateModel()
modelImprovable = True
master.updateModel()
if not modelImprovable:
print("*{:^88}*".format("No more improvable columns found."))
# Solve MP
master.finalSolve()
print(master.model.objval)
1) I am currently facing this error:
gurobipy.GurobiError: Only linear constraints allowed
I found out that it has something to do with the notation of the constraints, especially with the "[]" notation. Unfortunately, this is the only way to calculate the duals. How can I still work around the error?
2) Have i added the lambdas correctly?

1) If I increase max_itr, then the reduced cost of the subproblem also changes, albeit marginally. I think this is because the 's contraints change, which also changes . Could that be the reason?
By increasing max_itr, you are adding more variables and constraints. This can of course change the final result of the optimization. Thus, the observation you make is expected.
2) A lot of motivation values are added to the objective function, as in this example. Is this normal?
These variables are added with a 0 coefficient so they do not affect the objective function at all. They are only written into the objective by Gurobi in order to maintain the same variable ordering when reading in the generated LP file. Note that this is different from constraints where variables with a 0 coefficient are not printed.
3) Unfortunately, the demand(t,s) constraints remain unchanged, no matter how many iterations I perform. As a result, the dual values and also the objective function of the master problem do not change, since still applies and thus the objective function value remains unchanged at 21. Why were the quadratic terms from newSchedule added in the old code, but not anymore? Perhaps because they are no longer quadratic terms? See the .lp here:
The quadratic terms were added because you were multiplying two variables. Right now, because all motivation variable values are 0, you are adding a term \(0 \cdot \lambda\) which is not printed in constraints because it is just \(0\).
I think you should try to find out why all motivation values are 0. For that, you could write out the first model you solve and analyze whether it looks as expected. As long as the optimal solution values of your motivation variables are 0 you will see no progress in the algorithm, because you keep adding \(0 \cdot \lambda\) which does nothing.
0 
Thank you very much. It is quite surprising, then, that in Iteration 1, this solution vector for the SP(1) is being derived:
Optimal Values Iteration 1 for SP 1: {(1, 1, 1, 2): 0.0, (1, 1, 2, 2): 0.948, (1, 1, 3, 2): 0.0, (1, 2, 1, 2): 0. 913, (1, 2, 2, 2): 0.0, (1, 2, 3, 2): 0.0, (1, 3, 1, 2): 0.0, (1, 3, 2, 2): 0.0, (1, 3, 3, 2): 0.593, (1, 4, 1, 2): 0. 0, (1, 4, 2, 2): 0.892, (1, 4, 3, 2): 0.0, (1, 5, 1, 2): 0.0, (1, 5, 2, 2): 0.0, (1, 5, 3, 2): 0.0, (1, 6, 1, 2): 0. 0, (1, 6, 2, 2): 0.0, (1, 6, 3, 2): 0.962, (1, 7, 1, 2): 0.0, (1, 7, 2, 2): 0.464, (1, 7, 3, 2): 0.0}
As is easy to see, not all values are \(=0\) and should then actually also be addered. Why does this not happen anyway?
In fact, the constraint demand(1,2) should look like this (the other SPs have been omitted):
demand(1,2): slack[1,2] + [ motivation_i[1,1,2,1] * lmbda[1,1]
+ motivation_i[2,1,2,1] * lmbda[2,1]
+ motivation_i[3,1,2,1] * lmbda[3,1] ]
+ 0.948 * lmbda[1,2] + .... >= 10 
Good, now in \(\texttt{modifyConstraints}\) you can print every i,j,k,l value and self.motivation_i[i, j, k, l] that is actually used and investigate further from there.
0 
Thank you very much. It's finally working. I have now adapted these two functions, and now it works. It also spits out the same optimal values \(motivation\) after finalSolve() as if I look at the model without the index \(R\) and simply solve it as a whole. Just as it should be.
However, I noticed one strange thing. I have now used the model with the seed = 123 for the \(\alpha_{it}\), and the objective values are exactly the same, except for the typical Python numerical inequalities many digits after the decimal point. However, if I set the seed to 1234, for example, the objective value of the CG approach is even lower. How can that be? Possibly due to rounding errors?
0 
Thank you very much. It's finally working. I have now adapted these two functions, and now it works. It also spits out the same optimal values after finalSolve() as if I look at the model without the index and simply solve it as a whole. Just as it should be.
Great to hear!
However, I noticed one strange thing. I have now used the model with the seed = 123 for the , and the objective values are exactly the same, except for the typical Python numerical inequalities many digits after the decimal point. However, if I set the seed to 1234, for example, the objective value of the CG approach is even lower. How can that be? Possibly due to rounding errors?
How much lower is the objective function? Could you please log snippets of the two optimization runs? It is very possible that numerics play a role. In particular, because your model is convex but quadratic. You could try tightening the FeasibilityTol parameter and/or play with the BarConvTol parameter. However, if the difference is negligible, I wouldn't worry much.
0 
Nevermind, I swapped the values of the classic model (without CG) and the column generation. As a result, the column generation performs just as well in some instances as the simple implementation, but somewhat worse in most. So I assume that if I want to have the optimal solution, I have to implement a branchnprice algorithm, right? If I understand this post correctly and combine it with your previous answers, then a branchnprice algorithm is difficult to implement because it requires the dynamic addition of columns etc, which, as you mentioned, is not possible. Would it then even be possible to always solve the model optimally in Gurobi with CG?
Could this code possibly help?0 
Hi Carl,
Yes, implementing a branchandprice algorithm is definitely a difficult task.
Would it then even be possible to always solve the model optimally in Gurobi with CG?
I don't understand the question. A column generation algorithm would always solve a given model to optimality within the algorithm's limits. If column generation is not suited to tackle a specific task, then a different algorithm should be used.
Could this code possibly help?
I don't know the code. You could try it out or even contact the authors if in doubt.
I can recommend the GCG package for a good BranchandPrice implementation.
Best regards,
Jaromił0 
Are there any start guides that can help with the implementation of BranchnPrice?
And over the weekend I thought of two more questions.
1) Is it possible to delete all the unused variables (i.e. those created with max_itr but never used due to the low iteration count) before / for the finalSolve()?
2) Is it also possible to identify several columns with negative reduced costs in each SP and pass them to the MP?
0 
Are there any start guides that can help with the implementation of BranchnPrice?
I don't think that there is a guide of how to implement a BranchandPrice algorithm. I would strongly recommend using an already available implementation unless you have a very good reason to implement the algorithm yourself.
1) Is it possible to delete all the unused variables (i.e. those created with max_itr but never used due to the low iteration count) before / for the finalSolve()?
You can use the remove method to remove variables from the model. You would have to collect the unused variables first. Alternatively, you can just leave them in the model and just not read the values for those later on. Having (not too many) unused variables in the model should not have any performance impact.
2) Is it also possible to identify several columns with negative reduced costs in each SP and pass them to the MP?
I don't know. The last time when I worked with column generation algorithms is already quite far in the past. There is a lot of extensive work being done on Column Generation and BranchandPrice by Prof. Lübbecke at his OR institute. I would recommend having a look at their literature. You will very likely find all the answers to your questions there.
0 
Jaromił Najman Thanks. Is there any quick way to collect all those variables?
0 
Is there any quick way to collect all those variables?
No, you would have to keep a list of all variables of interest and mark them as "used". Then you can just remove all variables that are "not used".
0 
Jaromił Najman Thank you, I solved it by saving the value of the last iteration performed in a variable and then removing all variables with the index \(r>\)last_itr with remove(). I still have a question that relates to the GurobiAPi. I would like to create such a plot on slide 31. For this I need the information about
the iteration
the primal bound of the RMP
the dual bound of the RMP.
I have already read in the forum that this information can be extracted from the log, but I want to have the final values at the end of each iteration. Which command can I use to get these?
0 
but I want to have the final values at the end of each iteration.
What exactly do you mean by each iteration? Do you define an iteration as a solution of a subproblem in your algorithm?
In that case you can access the model attributes ObjVal (primal bound) and ObjBound (dual bound).
If you are interested in extracting the log information of every B&B iteration, then you can use the MESSAGE callback, see callback.py for an example usage. You would have to extract the relevant information from the log message string.
Best regards,
Jaromił0 
Jaromił Najman Its me again. I have been able to expand my model very well in the last few weeks and everything is working so far. Many thanks again. But now I have experimented a bit with my code and added a second "demand constraint" to my model. This is \(\sum_{i\in I} \sum_{r\in R(i)} \lambda_{ir}x_{its}^r \geq 0.1\cdot demand_{ts} \). I have also extended the objective function of the SPs accordingly to include the third dual values. Now I wanted to solve the model, but now I can't even calculate the dual values \(\mu_i\) or duals_i in the code. What is the reason for this? Haven't I actually changed anything in the model structure?
The new code is in the OP.0 
Your model is now either quadratic convex or quadratic nonconvex.
If it is quadratic convex, then you can still get the dual values by setting the QCPDual parameter.
If your model became nonconvex (you should see a log message stating it), then you cannot retrieve the dual values. We discuss a way to retrieve some duals in the Knowledge Base article How do I retrieve the (dual) Pi values for a MIP problem? Instead of fixing discrete variable, you would have to fix one of the variables participating in product terms.
Best regards,
Jaromił0 
Jaromił Najman Thanks. I dont know why it should be quadratic nonconvex now, as the new constraint is like the same as the original demand one. I tried the NonConvex with values of 1 and 2, but neither worked. Any further ideas?
def solveRelaxModel(self): self.model.Params.QCPDual = 1 self.model.Params.NonConvex = 2 for v in self.model.getVars(): v.setAttr('vtype', 'C') self.model.optimize()
0 
Could you please share the log output for the model? Maybe there is an indicator of what is happening.
0 
Jaromił Najman Sure. I updated the code and here are all the possible logs produced (lo0.txt , log1.txt and log2.txt). I guess log1.txt indicates the problem, stating:
Continuous model is nonconvex  solving as a MIP
You said that in this case a value must be fixed, but in my opinion this is done both in self.cons_demand[t, s] and in self.cons_demand2[t, s]. What else could be the problem? Any ideas why?
0 
Jaromił Najman I did some testing and noticed something strange. I copied the Demand_Dict once and multiplied all values by 0.1. If I now use the constraint:
def generateConstraints(self):
for t in self.days:
for s in self.shifts:
self.cons_demand_2[t, s] = self.model.addQConstr(
gu.quicksum(self.x_i[i, t, s, r]*self.lmbda[i, r] for i in self.nurses for r in self.rosterinitial)
>= 0, "demand2("+str(t)+","+str(s)+")")
return self.cons_demand_2then it can be solved, also for self.demand[t, s]. However, if I use self.demand2[t, s] instead of >= 0, then it does not work, nor for >= 1,2,.... . Why is it suddenly no longer solvable, but it is for >= 0 and >= self.demand[t, s]?
0 
Did you try to compute an IIS to determine the reason of infeasibility, see How do I determine why my model is infeasible?
0 
Jaromił Najman Well its weird since isnt infeasible but i just cant compute the duals. Any further ideas why that can be?
0 
Could you please elaborate what "then it does not work" means? Do you mean that when you multiply the constraint with 0.1 then the model is no longer nonconvex?
Why is it suddenly no longer solvable, but it is for >= 0 and >= self.demand[t, s]?
My guess is that for \(\geq 0\), the Q matrix can be made positive semidefinite by exploiting the binary variable property that \(b^2 = b\), see the webinar on Models with Products of Binary variables for more information.
You said that in this case a value must be fixed, but in my opinion this is done both in self.cons_demand[t, s] and in self.cons_demand2[t, s]. What else could be the problem? Any ideas why?
What I meant by value fixing is that you first solve the nonconvex model to optimality. After the nonconvex model has been solved, you fix one of the variables in bilinear products to the optimal solution value. This will turn the originally bilinear terms into linear ones. You can now resolve the model, which is now and LP and you can retrieve the duals, see How do I retrieve the (dual) Pi values for a MIP problem? Please note that since your model has discrete variables, you will not be able to retrieve dual values anyway. So you will have to fix the values of all discrete variables to its optimal solution values after the model has been solved once, and then resolve the continuous linear program to get duals.
0 
Jaromił Najman Sorry, its me again. I recently tried to aggregate all my subproblems into one and use an aggregated formulation of the master problem. To do this, I got rid of the index \(i\) in both the master and the subproblems, and adjusted \(\lambda\) to be an integer value instead of binary. Now I get this error:
Pi_cons_lmbda = self.model.getAttr("Pi", self.cons_lmbda)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "src\\gurobipy\\model.pxi", line 1932, in gurobipy.Model.getAttr
gurobipy.GurobiError: Unrecognized argument to getAttrProbably due to a problem with the constraint statement in line 58 and the dual retrieval in line 7476 (see code in the OP). Any idea how to fix this?
0 
Probably due to a problem with the constraint statement in line 58 and the dual retrieval in line 7476 (see code in the OP). Any idea how to fix this?
It is not possible to compute Pi values for models with discrete variables. We discuss a possible solution in How do I retrieve the (dual) Pi values for a MIP problem?
Best regards,
Jaromił0 
Jaromił Najman I know, and I did relax the model before solving it, but sadly it still doesn't work. The computation of the other dual values does work however, as I can print them out if I call them before calculating "duals_i". Why ist that the case?
0 
The object \(\texttt{self.cons_lmbda}\) is only a single constraints. For single constraints, the model.getAttr method does not work. For single constraints, you can access its Pi value by
Pi_cons_lmbda = self.cons_lmbda.Pi
Best regards,
Jaromił0 
Jaromił Najman Hi. I have a new conical problem which appearance. I want to read and analyze all solutions from the solution pool of optimal solutions. For this I use this code.
for k in range(master.model.SolCount):
master.model.setParam(gu.GRB.Param.SolutionNumber, k)
vals = master.model.getAttr("Xn", master.lmbda)
solution = {key: round(value) for key, value in vals.items()}
I now have the problem that strange properties occur for some of these solutions from the pool. For example, if I solve the model with 3 nurses, then in the final solution it should be the solution over the lambda = 3, but in some of these solutions the sum is significantly greater than 3, e.g. 50. What could be the reason for this?0 
Hi Carl,
Did you double check that constraints assuring that \(\lambda = 3\) are indeed in the \(\texttt{master.model}\) object? For this you can use the write method and generate a humanreadable LP file.
master.model.write("myModel.lp")
You can open the file in any standard text editor and analyze whether \(\lambda = 3\) is indeed forced by the model's constraints.
Please also have a look at the Knowledge Base article How do I diagnose a wrong solution?
Best regards,
Jaromił0
Please sign in to leave a comment.
Comments
88 comments