Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix in the lowerbound tightening #204

Merged
merged 24 commits into from
Oct 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/birkhoff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,12 @@ function build_birkhoff_lmo()
# doubly stochastic constraints
MOI.add_constraint.(
o,
vec(sum(X[i], dims=1, init=MOI.ScalarAffineFunction{Float64}([], 0.0))),
X[i] * ones(n),
MOI.EqualTo(1.0),
)
MOI.add_constraint.(
o,
vec(sum(X[i], dims=2, init=MOI.ScalarAffineFunction{Float64}([], 0.0))),
X[i]' * ones(n),
MOI.EqualTo(1.0),
)
# 0 ≤ Y_i ≤ X_i
Expand Down
142 changes: 84 additions & 58 deletions examples/optimal_experiment_design.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,88 +40,114 @@ include("oed_utils.jl")
https://github.com/ZIB-IOL/FrankWolfe.jl/blob/master/examples/optimal_experiment_design.jl
"""

m = 40

m = 50
verbose = true

## A-Optimal Design Problem

@testset "A-Optimal Design" begin

Ex_mat, n, N, ub = build_data(m)

# sharpness constants
σ = minimum(Ex_mat' * Ex_mat)
λ_max = maximum(ub) * maximum([norm(Ex_mat[i,:])^2 for i=1:size(Ex_mat,1)])
θ = 1/2
M = sqrt(λ_max^3/ n * σ^4)


g, grad! = build_a_criterion(Ex_mat, build_safe=true)
g, grad! = build_a_criterion(Ex_mat, build_safe=false)
blmo = build_blmo(m, N, ub)
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
domain_oracle = build_domain_oracle(Ex_mat, n)
line_search = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Adaptive(), domain_oracle)
heu = Boscia.Heuristic(Boscia.rounding_hyperplane_heuristic, 0.7, :hyperplane_aware_rounding)
domain_oracle = build_domain_oracle(Ex_mat, n)

x, _, result = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, time_limit=10, verbose=false, domain_oracle=domain_oracle, custom_heuristics=[heu], sharpness_exponent=θ, sharpness_constant=M, line_search=line_search)

_, active_set = build_start_point(Ex_mat, N, ub)
# precompile
line_search = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Adaptive(), domain_oracle)
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
x, _, result = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, verbose=verbose, domain_oracle=domain_oracle, custom_heuristics=[heu], sharpness_exponent=θ, sharpness_constant=M, line_search=line_search) #sharpness_exponent=θ, sharpness_constant=M,

gradient = similar(x)
grad!(gradient, x)
v = Boscia.compute_extreme_point(blmo, gradient)
dual_gap = FrankWolfe.dot(gradient, x) - FrankWolfe.dot(gradient, v)
@show dual_gap
x, _, _ = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, time_limit=10, verbose=false, domain_oracle=domain_oracle, custom_heuristics=[heu], line_search=line_search)

blmo = build_blmo(m, N, ub)
_, active_set = build_start_point(Ex_mat, N, ub)
# proper run with MGLS and Adaptive
line_search = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Adaptive(), domain_oracle)
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
domain_oracle = build_domain_oracle(Ex_mat, n)
heu = Boscia.Heuristic(Boscia.rounding_hyperplane_heuristic, 0.7, :hyperplane_aware_rounding)

x_s, _, result = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, verbose=verbose, line_search=FrankWolfe.Secant(domain_oracle=domain_oracle), domain_oracle=domain_oracle, sharpness_exponent=θ, sharpness_constant=M, custom_heuristics=[heu]) #sharpness_exponent=θ, sharpness_constant=M,

@show x
@show x_s
@show g(x), g(x_s)
@test isapprox(g(x), g(x_s), atol=1e-3, rtol=5e-2)
x, _, result = Boscia.solve(
g,
grad!,
blmo,
active_set=active_set,
start_solution=z,
verbose=verbose,
domain_oracle=domain_oracle,
custom_heuristics=[heu],
line_search=line_search,
)

# Run with Secant
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
line_search = FrankWolfe.Secant(domain_oracle=domain_oracle)

x_s, _, result_s = Boscia.solve(
g,
grad!,
blmo,
active_set=active_set,
start_solution=z,
verbose=verbose,
domain_oracle=domain_oracle,
custom_heuristics=[heu],
line_search=line_search,
)

@test result_s[:dual_bound] <= g(x) + 1e-4
@test result[:dual_bound] <= g(x_s) + 1e-4
@test isapprox(g(x), g(x_s), atol=1e-3)
end

## D-Optimal Design Problem

@testset "D-optimal Design" begin
Ex_mat, n, N, ub = build_data(m)

# sharpness constants
σ = minimum(Ex_mat' * Ex_mat)
λ_max = maximum(ub) * maximum([norm(Ex_mat[i,:])^2 for i=1:size(Ex_mat,1)])
θ = 1/2
M = sqrt(2 * λ_max^2/ n * σ^4 )


g, grad! = build_d_criterion(Ex_mat, build_safe=true)
g, grad! = build_d_criterion(Ex_mat, build_safe=false)
blmo = build_blmo(m, N, ub)
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
domain_oracle = build_domain_oracle(Ex_mat, n)
heu = Boscia.Heuristic(Boscia.rounding_hyperplane_heuristic, 0.7, :hyperplane_aware_rounding)
domain_oracle = build_domain_oracle(Ex_mat, n)

x, _, result = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, verbose=verbose, domain_oracle=domain_oracle, sharpness_exponent=θ, sharpness_constant=M, custom_heuristics=[heu])

blmo = build_blmo(m, N, ub)
# precompile
line_search = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Adaptive(), domain_oracle)
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
domain_oracle = build_domain_oracle(Ex_mat, n)
heu = Boscia.Heuristic(Boscia.rounding_hyperplane_heuristic, 0.7, :hyperplane_aware_rounding)
x, _, _ = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, time_limit=10, verbose=false, domain_oracle=domain_oracle, custom_heuristics=[heu], line_search=line_search)

x_s, _, result = Boscia.solve(g, grad!, blmo, active_set=active_set, start_solution=z, verbose=verbose, line_search=FrankWolfe.Secant(domain_oracle=domain_oracle), domain_oracle=domain_oracle, sharpness_exponent=θ, sharpness_constant=M, custom_heuristics=[heu])

@show x
@show x_s
@show g(x), g(x_s)
@test isapprox(g(x), g(x_s), atol=1e-4, rtol=1e-2)
# proper run with MGLS and Adaptive
line_search = FrankWolfe.MonotonicGenericStepsize(FrankWolfe.Adaptive(), domain_oracle)
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
x, _, result = Boscia.solve(
g,
grad!,
blmo,
active_set=active_set,
start_solution=z,
verbose=verbose,
domain_oracle=domain_oracle,
custom_heuristics=[heu],
line_search=line_search,
)

# Run with Secant
x0, active_set = build_start_point(Ex_mat, N, ub)
z = greedy_incumbent(Ex_mat, N, ub)
line_search = FrankWolfe.Secant(domain_oracle=domain_oracle)

x_s, _, result_s = Boscia.solve(
g,
grad!,
blmo,
active_set=active_set,
start_solution=z,
verbose=verbose,
domain_oracle=domain_oracle,
custom_heuristics=[heu],
line_search=line_search,
)

@test result_s[:dual_bound] <= g(x)
@test result[:dual_bound] <= g(x_s)
@test isapprox(g(x), g(x_s), rtol=1e-2)
end

2 changes: 1 addition & 1 deletion src/node.jl
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ function Bonobo.evaluate_node!(tree::Bonobo.BnBTree, node::FrankWolfeNode)
elseif node.id == 1
@debug "Lower bound of root node: $(lower_bound)"
@debug "Current incumbent: $(tree.incumbent)"
@assert lower_bound <= tree.incumbent + node.fw_dual_gap_limit "lower_bound <= tree.incumbent + node.fw_dual_gap_limit : $(lower_bound) <= $(tree.incumbent + node.fw_dual_gap_limit)"
@assert lower_bound <= tree.incumbent + dual_gap "lower_bound <= tree.incumbent + dual_gap : $(lower_bound) <= $(tree.incumbent + dual_gap)"
end

# Call heuristic
Expand Down
10 changes: 6 additions & 4 deletions src/tightenings.jl
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,13 @@ function tightening_lowerbound(tree, node, x, lower_bound)
@debug "Using sharpness θ=$θ and M=$M"
fx = tree.root.problem.f(x)

if node.dual_gap < 0.0
@assert abs(node.dual_gap) > eps() "node dual gap is negative: $(node.dual_gap)"
node.dual_gap = 0.0
end

sharpness_bound = M^(- 1 / θ) * 1/ 2 * (sqrt(bound_improvement) - M / 2 * node.dual_gap^θ)^(1 / θ) + fx - node.dual_gap

@debug "Sharpness: $lower_bound -> $sharpness_bound"
@assert num_fractional == 0 || sharpness_bound >= lower_bound "$(num_fractional) == 0 || $(sharpness_bound) > $(lower_bound)"
end
Expand Down Expand Up @@ -283,10 +289,6 @@ function prune_children(tree, node, lower_bound_base, x, vidx)
@debug "prune right, from $(node.lb) -> $new_bound_right, ub $(tree.incumbent), lb $(node.lb)"
prune_right = true
end
@assert !(
(new_bound_left > tree.incumbent + tree.root.options[:dual_gap]) &&
(new_bound_right > tree.incumbent + tree.root.options[:dual_gap])
)
end
end

Expand Down
36 changes: 36 additions & 0 deletions src/utilities.jl
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,42 @@ function sparse_min_via_enum(f, n, k, values=fill(0:1, n))
return best_val, best_sol
end

function min_via_enum_simplex(f, n, N, values=fill(0:1,n))
solutions = Iterators.product(values...)
best_val = Inf
best_sol = nothing
for sol in solutions
sol_vec = collect(sol)
if sum(sol_vec) > N
continue
end
val = f(sol_vec)
if best_val > val
best_val = val
best_sol = sol_vec
end
end
return best_val, best_sol
end

function min_via_enum_prob_simplex(f, n, N, values=fill(0:1,n))
solutions = Iterators.product(values...)
best_val = Inf
best_sol = nothing
for sol in solutions
sol_vec = collect(sol)
if sum(sol_vec) != N
continue
end
val = f(sol_vec)
if best_val > val
best_val = val
best_sol = sol_vec
end
end
return best_val, best_sol
end


# utility function to print the values of the parameters
_value_to_print(::Bonobo.BestFirstSearch) = "Move best bound"
Expand Down
Loading
Loading