Skip to content

Various fixes to KirlikSayin #108

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 28 additions & 33 deletions src/algorithms/KirlikSayin.jl
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ function optimize_multiobjective!(algorithm::KirlikSayin, model::Optimizer)
model.ideal_point .*= -1
return status, solutions
end
@assert sense == MOI.MIN_SENSE
solutions = SolutionPoint[]
# Problem with p objectives.
# Set k = 1, meaning the nondominated points will get projected
Expand All @@ -99,68 +100,62 @@ function optimize_multiobjective!(algorithm::KirlikSayin, model::Optimizer)
variables = MOI.get(model.inner, MOI.ListOfVariableIndices())
n = MOI.output_dimension(model.f)
yI, yN = zeros(n), zeros(n)
δ = sense == MOI.MIN_SENSE ? -1 : 1
# This tolerance is really important!
δ = 1.0
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this tolerance arbitrary? Why 1.0?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since these algorithms target integer programs, the e-constraints enforces f_i(x) < u_i and the bound are integer as well.

scalars = MOI.Utilities.scalarize(model.f)
# Ideal and Nadir point estimation
for (i, f_i) in enumerate(scalars)
# Ideal point
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(f_i)}(), f_i)
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yI[i] = Y + 1
model.ideal_point[i] = Y
MOI.set(
model.inner,
MOI.ObjectiveSense(),
sense == MOI.MIN_SENSE ? MOI.MAX_SENSE : MOI.MIN_SENSE,
)
model.ideal_point[i] = yI[i] = Y
# Nadir point
MOI.set(model.inner, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(model.inner)
status = MOI.get(model.inner, MOI.TerminationStatus())
if !_is_scalar_status_optimal(status)
_warn_on_nonfinite_anti_ideal(algorithm, sense, i)
# Repair ObjectiveSense before exiting
MOI.set(model.inner, MOI.ObjectiveSense(), MOI.MIN_SENSE)
_warn_on_nonfinite_anti_ideal(algorithm, MOI.MIN_SENSE, i)
return status, nothing
end
_, Y = _compute_point(model, variables, f_i)
yN[i] = Y
yN[i] = Y + δ
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So the +1 (now + δ) was needed because we have a separate constraint that f_i <= u - δ. So the box bounds need to account for that?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There was a +1 here because the \delta was for e-constraints. Having +1 makes it consistent with KS and DR. Original algorithm uses some big M.

MOI.set(model.inner, MOI.ObjectiveSense(), MOI.MIN_SENSE)
end
# Reset the sense after modifying it.
MOI.set(model.inner, MOI.ObjectiveSense(), sense)
L = [_Rectangle(_project(yI, k), _project(yN, k))]
SetType = ifelse(
sense == MOI.MIN_SENSE,
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
)
status = MOI.OPTIMAL
while !isempty(L)
if _time_limit_exceeded(model, start_time)
status = MOI.TIME_LIMIT
break
return MOI.TIME_LIMIT, solutions
end
Rᵢ = L[argmax([_volume(Rᵢ, _project(yI, k)) for Rᵢ in L])]
lᵢ, uᵢ = Rᵢ.l, Rᵢ.u
max_volume_index = argmax([_volume(Rᵢ, _project(yI, k)) for Rᵢ in L])
uᵢ = L[max_volume_index].u
# Solving the first stage model: P_k(ε)
# Set ε := uᵢ
ε = insert!(copy(uᵢ), k, 0.0)
ε_constraints = Any[]
# minimize: f_1(x)
# s.t.: f_i(x) <= u_i - δ
@assert k == 1
MOI.set(
model.inner,
MOI.ObjectiveFunction{typeof(scalars[k])}(),
scalars[k],
)
ε_constraints = Any[]
for (i, f_i) in enumerate(scalars)
if i != k
ci = MOI.Utilities.normalize_and_add_constraint(
model.inner,
f_i,
SetType(ε[i] + δ),
)
push!(ε_constraints, ci)
if i == k
continue
end
ci = MOI.Utilities.normalize_and_add_constraint(
model.inner,
f_i,
MOI.LessThan{Float64}(uᵢ[i-1] - δ),
)
push!(ε_constraints, ci)
end
MOI.optimize!(model.inner)
if !_is_scalar_status_optimal(model)
Expand All @@ -171,7 +166,7 @@ function optimize_multiobjective!(algorithm::KirlikSayin, model::Optimizer)
zₖ = MOI.get(model.inner, MOI.ObjectiveValue())
# Solving the second stage model: Q_k(ε, zₖ)
# Set objective sum(model.f)
sum_f = sum(1.0 * s for s in scalars)
sum_f = MOI.Utilities.operate(+, Float64, scalars...)
MOI.set(model.inner, MOI.ObjectiveFunction{typeof(sum_f)}(), sum_f)
# Constraint to eliminate weak dominance
zₖ_constraint = MOI.Utilities.normalize_and_add_constraint(
Expand Down
40 changes: 39 additions & 1 deletion test/algorithms/KirlikSayin.jl
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,7 @@ function test_infeasible()
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint(model, 1.0 * x[1] + 1.0 * x[2], MOI.LessThan(-1.0))
f = MOI.Utilities.operate(vcat, Float64, 1.0 .* x...)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
Expand Down Expand Up @@ -603,7 +604,44 @@ function test_vector_of_variables_objective()
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.add_constraint(model, sum(1.0 * xi for xi in x), MOI.GreaterThan(1.0))
MOI.optimize!(model)
MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
return
end

function test_issue_105()
cost = [100.0, 120.0, 150.0, 110.0, 200.0, 170.0]
time = [8.0, 3.0, 4.0, 2.0, 5.0, 4.0]
capacity = [10.0, 8.0]
demand = [5.0, 8.0, 5.0]
m, n = 2, 3
model = MOI.instantiate(; with_bridge_type = Float64) do
return MOA.Optimizer(HiGHS.Optimizer)
end
MOI.set(model, MOA.Algorithm(), MOA.KirlikSayin())
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variables(model, m * n)
MOI.add_constraint.(model, x, MOI.GreaterThan(0.0))
MOI.add_constraint.(model, x, MOI.Integer())
X = reshape(x, m, n)
for i in 1:m
f_i = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, X[i, :]), 0.0)
MOI.add_constraint(model, f_i, MOI.LessThan(capacity[i]))
end
for j in 1:n
f_j = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, X[:, j]), 0.0)
MOI.add_constraint(model, f_j, MOI.EqualTo(demand[j]))
end
f = MOI.Utilities.operate(
vcat,
Float64,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(cost, x), 0.0),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(time, x), 0.0),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, x), 0.0),
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.optimize!(model)
@test MOI.get(model, MOI.ResultCount()) == 6
return
end

Expand Down
Loading