Skip to content

Commit

Permalink
Add test
Browse files Browse the repository at this point in the history
  • Loading branch information
AngelFP committed Sep 26, 2023
1 parent ae73ef1 commit 179aa8e
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 1 deletion.
2 changes: 2 additions & 0 deletions tests/resources/env_script.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/usr/bin/env bash
export LIBE_TEST_SUB_ENV_VAR="testvalue"
8 changes: 7 additions & 1 deletion tests/resources/template_simulation_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,16 @@
Dummy simulation template used for testing. It takes x0 and x1 as input
parameters and stores the result in `result.txt`.
"""
import os
import numpy as np

test_env_var = os.getenv('LIBE_TEST_SUB_ENV_VAR')

# 2D function with multiple minima
result = -( {{x0}} + 10*np.cos({{x0}}) )*( {{x1}} + 5*np.cos({{x1}}) )

with open('result.txt', 'w') as f:
f.write("%f" %result)
output = [str(result)]
if test_env_var is not None:
output.append(test_env_var)
f.writelines(output)
67 changes: 67 additions & 0 deletions tests/test_env_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import os

import numpy as np

from optimas.explorations import Exploration
from optimas.generators import RandomSamplingGenerator
from optimas.evaluators import TemplateEvaluator
from optimas.core import VaryingParameter, Objective, Parameter


def analysis_func(sim_dir, output_params):
"""Analysis function used by the template evaluator."""
# Read back result from file
with open('result.txt') as f:
result = f.readlines()
f = float(result[0])
test_var = result[1]
output_params['f'] = f
output_params['test_var'] = test_var


def test_env_script():
# Define variables and objectives.
var1 = VaryingParameter('x0', -50., 5.)
var2 = VaryingParameter('x1', -5., 15.)
obj = Objective('f', minimize=False)
test_var = Parameter('test_var', dtype=str)

# Define variables and objectives.
gen = RandomSamplingGenerator(
varying_parameters=[var1, var2],
objectives=[obj],
analyzed_parameters=[test_var]
)

# Create template evaluator.
ev = TemplateEvaluator(
sim_template=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'resources',
'template_simulation_script.py'
),
analysis_func=analysis_func,
env_script=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'resources',
'env_script.sh'
),
)

# Create exploration.
exploration = Exploration(
generator=gen,
evaluator=ev,
max_evals=10,
sim_workers=2,
exploration_dir_path='./tests_output/test_env_script'
)

# Run exploration.
exploration.run()

assert np.all(exploration.history['test_var'] == 'testvalue')


if __name__ == '__main__':
test_env_script()

0 comments on commit 179aa8e

Please sign in to comment.