Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Oct 10, 2023
1 parent 7a0167b commit d3881be
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 86 deletions.
17 changes: 7 additions & 10 deletions optimas/core/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
def json_dumps_dtype(v, *, default):
"""Add support for dumping numpy dtype to json."""
for key, value in v.items():
if key == 'dtype':
if key == "dtype":
v[key] = np.dtype(value).descr
return json.dumps(v)

Expand All @@ -36,10 +36,7 @@ class Parameter(BaseModel):
dtype: Optional[Any]

def __init__(
self,
name: str,
dtype: Optional[Any] = float,
**kwargs
self, name: str, dtype: Optional[Any] = float, **kwargs
) -> None:
super().__init__(name=name, dtype=dtype, **kwargs)

Expand Down Expand Up @@ -96,7 +93,7 @@ def __init__(
is_fidelity: Optional[bool] = False,
fidelity_target_value: Optional[float] = None,
default_value: Optional[float] = None,
dtype: Optional[Any] = float
dtype: Optional[Any] = float,
) -> None:
super().__init__(
name=name,
Expand All @@ -105,7 +102,7 @@ def __init__(
upper_bound=upper_bound,
is_fidelity=is_fidelity,
fidelity_target_value=fidelity_target_value,
default_value=default_value
default_value=default_value,
)


Expand All @@ -129,7 +126,7 @@ def __init__(
self,
name: str,
save_name: Optional[str] = None,
dtype: Optional[Any] = float
dtype: Optional[Any] = float,
) -> None:
super().__init__(name=name, save_name=save_name, dtype=dtype)
self.save_name = name if save_name is None else save_name
Expand All @@ -151,8 +148,8 @@ class Objective(Parameter):

def __init__(
self,
name: Optional[str] = 'f',
name: Optional[str] = "f",
minimize: Optional[bool] = True,
dtype: Optional[Any] = float
dtype: Optional[Any] = float,
) -> None:
super().__init__(name=name, minimize=minimize, dtype=dtype)
7 changes: 1 addition & 6 deletions optimas/core/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,5 @@ class Task(BaseModel):
n_init: int
n_opt: int

def __init__(
self,
name: str,
n_init: int,
n_opt: int
) -> None:
def __init__(self, name: str, n_init: int, n_opt: int) -> None:
super().__init__(name=name, n_init=n_init, n_opt=n_opt)
22 changes: 11 additions & 11 deletions optimas/explorations/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,23 +325,23 @@ def _save_generator_parameters(self):
"""Save generator parameters to a JSON file."""
params = {}
for i, param in enumerate(self.generator.varying_parameters):
params[f'varying_parameter_{i}'] = {
'type': 'VaryingParameter',
'value': param.json()
params[f"varying_parameter_{i}"] = {
"type": "VaryingParameter",
"value": param.json(),
}
for i, param in enumerate(self.generator.objectives):
params[f'objective_{i}'] = {
'type': 'Objective',
'value': param.json()
params[f"objective_{i}"] = {
"type": "Objective",
"value": param.json(),
}
for i, param in enumerate(self.generator.analyzed_parameters):
params[f'analyzed_parameter_{i}'] = {
'type': 'Parameter',
'value': param.json()
params[f"analyzed_parameter_{i}"] = {
"type": "Parameter",
"value": param.json(),
}
main_dir = os.path.abspath(self.exploration_dir_path)
if not os.path.isdir(main_dir):
os.makedirs(main_dir)
file_path = os.path.join(main_dir, 'generator_parameters.json')
with open(file_path, 'w') as file:
file_path = os.path.join(main_dir, "generator_parameters.json")
with open(file_path, "w") as file:
file.write(json.dumps(params))
101 changes: 60 additions & 41 deletions optimas/post_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,31 +34,34 @@ def __init__(
self,
path: str,
relative_start_time: Optional[bool] = True,
remove_unfinished_evaluations: Optional[bool] = True
remove_unfinished_evaluations: Optional[bool] = True,
) -> None:
# Find the `npy` file that contains the results
if os.path.isdir(path):
# Get history files sorted by creation date.
output_files = [
filename for filename in os.listdir(path)
if "_history_" in filename and filename.endswith('.npy')
filename
for filename in os.listdir(path)
if "_history_" in filename and filename.endswith(".npy")
]
output_files.sort(
key=lambda f: os.path.getmtime(os.path.join(path, f))
)
if len(output_files) == 0:
raise RuntimeError(
'The specified path does not contain any history file.')
"The specified path does not contain any history file."
)
elif len(output_files) > 1:
warn(
'The specified path contains multiple history files. '
'The most recent one will be used.')
"The specified path contains multiple history files. "
"The most recent one will be used."
)
output_file = output_files[-1]
params_file = os.path.join(path, 'generator_parameters.json')
elif path.endswith('.npy'):
params_file = os.path.join(path, "generator_parameters.json")
elif path.endswith(".npy"):
output_file = path
params_file = os.path.join(
pathlib.Path(path).parent, 'generator_parameters.json'
pathlib.Path(path).parent, "generator_parameters.json"
)
else:
raise RuntimeError(
Expand All @@ -76,12 +79,12 @@ def __init__(

# Make the time relative to the start of the simulation
if relative_start_time:
start_time = self._df['gen_started_time'].min()
self._df['sim_started_time'] -= start_time
self._df['sim_ended_time'] -= start_time
self._df['gen_started_time'] -= start_time
self._df['gen_ended_time'] -= start_time
self._df['gen_informed_time'] -= start_time
start_time = self._df["gen_started_time"].min()
self._df["sim_started_time"] -= start_time
self._df["sim_ended_time"] -= start_time
self._df["gen_started_time"] -= start_time
self._df["gen_ended_time"] -= start_time
self._df["gen_informed_time"] -= start_time

# Read varying parameters, objectives, etc.
self._read_generator_parameters(params_file)
Expand All @@ -98,14 +101,14 @@ def _read_generator_parameters(self, params_file: str) -> None:
with open(params_file) as f:
d = json.load(f)
for _, param in d.items():
if param['type'] == 'VaryingParameter':
p = VaryingParameter.parse_raw(param['value'])
if param["type"] == "VaryingParameter":
p = VaryingParameter.parse_raw(param["value"])
self._varying_parameters[p.name] = p
elif param['type'] == 'Objective':
p = Objective.parse_raw(param['value'])
elif param["type"] == "Objective":
p = Objective.parse_raw(param["value"])
self._objectives[p.name] = p
elif param['type'] == 'Parameter':
p = Parameter.parse_raw(param['value'])
elif param["type"] == "Parameter":
p = Parameter.parse_raw(param["value"])
self._analyzed_parameters[p.name] = p

def _rearrange_dataframe_columns(self) -> None:
Expand All @@ -115,17 +118,28 @@ def _rearrange_dataframe_columns(self) -> None:
when printing or viewing the dataframe because the order of the
numpy history file is different from run to run.
"""
ordered_columns = ['trial_index']
ordered_columns = ["trial_index"]
ordered_columns += self._varying_parameters.keys()
ordered_columns += self._objectives.keys()
ordered_columns += self._analyzed_parameters.keys()
ordered_columns += [
'sim_id', 'sim_worker', 'sim_started_time', 'sim_ended_time',
'sim_started', 'sim_ended',
'gen_worker', 'gen_started_time', 'gen_ended_time',
'gen_informed_time', 'gen_informed',
'cancel_requested', 'kill_sent', 'given_back',
'num_procs', 'num_gpus']
"sim_id",
"sim_worker",
"sim_started_time",
"sim_ended_time",
"sim_started",
"sim_ended",
"gen_worker",
"gen_started_time",
"gen_ended_time",
"gen_informed_time",
"gen_informed",
"cancel_requested",
"kill_sent",
"given_back",
"num_procs",
"num_gpus",
]
ordered_columns += [c for c in self._df if c not in ordered_columns]
self._df = self._df[ordered_columns]

Expand Down Expand Up @@ -153,7 +167,7 @@ def plot_objective(
self,
objective: Optional[str] = None,
fidelity_parameter: Optional[str] = None,
show_trace: Optional[bool] = False
show_trace: Optional[bool] = False,
) -> None:
"""Plot the values that where reached during the optimization.
Expand All @@ -178,19 +192,20 @@ def plot_objective(
_, ax = plt.subplots()
ax.scatter(self._df.sim_ended_time, self._df[objective], c=fidelity)
ax.set_ylabel(objective)
ax.set_xlabel('Time (s)')
ax.set_xlabel("Time (s)")

if show_trace:
t_trace, obj_trace = self.get_objective_trace(
objective, fidelity_parameter)
objective, fidelity_parameter
)
ax.plot(t_trace, obj_trace)

def get_objective_trace(
self,
objective: Optional[str] = None,
fidelity_parameter: Optional[str] = None,
min_fidelity: Optional[float] = None,
t_array: Optional[npt.NDArray] = None
t_array: Optional[npt.NDArray] = None,
) -> Tuple[npt.NDArray, npt.NDArray]:
"""Get the cumulative maximum or minimum of the objective.
Expand Down Expand Up @@ -224,7 +239,7 @@ def get_objective_trace(
else:
df = self._df.copy()

df = df.sort_values('sim_ended_time')
df = df.sort_values("sim_ended_time")
t = df.sim_ended_time.values
if objective.minimize:
obj_trace = df[objective.name].cummin().values
Expand All @@ -248,8 +263,7 @@ def get_objective_trace(
return t_array, obj_trace_array

def plot_worker_timeline(
self,
fidelity_parameter: Optional[str] = None
self, fidelity_parameter: Optional[str] = None
) -> None:
"""Plot the timeline of worker utilization.
Expand All @@ -274,10 +288,15 @@ def plot_worker_timeline(
(fidelity - min_fidelity) / (max_fidelity - min_fidelity)
)
else:
color = 'tab:blue'
ax.barh([str(df['sim_worker'].iloc[i])],
[duration], left=[start],
color=color, edgecolor='k', linewidth=1)
color = "tab:blue"
ax.barh(
[str(df["sim_worker"].iloc[i])],
[duration],
left=[start],
color=color,
edgecolor="k",
linewidth=1,
)

ax.set_ylabel('Worker')
ax.set_xlabel('Time (s)')
ax.set_ylabel("Worker")
ax.set_xlabel("Time (s)")
34 changes: 16 additions & 18 deletions tests/test_exploration_diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,25 @@

def eval_func(input_params, output_params):
"""Evaluation function used for testing"""
x0 = input_params['x0']
x1 = input_params['x1']
x0 = input_params["x0"]
x1 = input_params["x1"]
result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1))
output_params['f1'] = result
output_params["f1"] = result


def test_exploration_diagnostics():
"""Test the `ExplorationDiagnostics` class."""

exploration_dir_path = './tests_output/test_exploration_diagnostics'
exploration_dir_path = "./tests_output/test_exploration_diagnostics"

# Define variables and objectives.
var1 = VaryingParameter('x0', -50., 5.)
var2 = VaryingParameter('x1', -5., 15.)
obj = Objective('f1', minimize=False)
var1 = VaryingParameter("x0", -50.0, 5.0)
var2 = VaryingParameter("x1", -5.0, 15.0)
obj = Objective("f1", minimize=False)

# Create generator.
gen = RandomSamplingGenerator(
varying_parameters=[var1, var2],
objectives=[obj]
varying_parameters=[var1, var2], objectives=[obj]
)

# Create function evaluator.
Expand All @@ -43,7 +42,7 @@ def test_exploration_diagnostics():
evaluator=ev,
max_evals=10,
sim_workers=2,
exploration_dir_path=exploration_dir_path
exploration_dir_path=exploration_dir_path,
)

# Run exploration.
Expand All @@ -53,12 +52,11 @@ def test_exploration_diagnostics():
diags = ExplorationDiagnostics(
exploration_dir_path,
relative_start_time=False,
remove_unfinished_evaluations=False
remove_unfinished_evaluations=False,
)
for name in exploration.history.dtype.names:
np.testing.assert_array_equal(
diags.df[name].array.to_numpy(),
exploration.history[name]
diags.df[name].array.to_numpy(), exploration.history[name]
)

for p_in, p_out in zip(gen.varying_parameters, diags.varying_parameters):
Expand All @@ -69,14 +67,14 @@ def test_exploration_diagnostics():
assert p_in.json() == p_out.json()

diags.plot_objective(show_trace=True)
plt.savefig(os.path.join(exploration_dir_path, 'optimization.png'))
plt.savefig(os.path.join(exploration_dir_path, "optimization.png"))

diags.plot_worker_timeline()
plt.savefig(os.path.join(exploration_dir_path, 'timeline.png'))
plt.savefig(os.path.join(exploration_dir_path, "timeline.png"))

# Check that all 3 possible objective inputs give the same result.
_, trace1 = diags.get_objective_trace()
_, trace2 = diags.get_objective_trace('f1')
_, trace2 = diags.get_objective_trace("f1")
_, trace3 = diags.get_objective_trace(obj)
np.testing.assert_array_equal(trace1, trace2)
np.testing.assert_array_equal(trace1, trace3)
Expand All @@ -93,8 +91,8 @@ def test_exploration_diagnostics():
ax.axhline(vps[1].upper_bound)
ax.set_ylabel(vps[1].name)
ax.scatter(df[vps[0].name], df[vps[1].name], c=df[f1.name])
fig.savefig(os.path.join(exploration_dir_path, 'search_space.png'))
fig.savefig(os.path.join(exploration_dir_path, "search_space.png"))


if __name__ == '__main__':
if __name__ == "__main__":
test_exploration_diagnostics()

0 comments on commit d3881be

Please sign in to comment.