Skip to content

Commit

Permalink
🎨 Format Python code with psf/black (#297)
Browse files Browse the repository at this point in the history
Co-authored-by: dario-coscia <[email protected]>
  • Loading branch information
github-actions[bot] and dario-coscia authored May 10, 2024
1 parent e0429bb commit 9463ae4
Show file tree
Hide file tree
Showing 11 changed files with 169 additions and 160 deletions.
3 changes: 1 addition & 2 deletions pina/solvers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,10 @@
"SupervisedSolver",
"ReducedOrderModelSolver",
"GAROM",
]
]

from .solver import SolverInterface
from .pinns import *
from .supervised import SupervisedSolver
from .rom import ReducedOrderModelSolver
from .garom import GAROM

33 changes: 17 additions & 16 deletions pina/solvers/pinns/basepinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,12 @@

torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732


class PINNInterface(SolverInterface, metaclass=ABCMeta):
"""
Base PINN solver class. This class implements the Solver Interface
for Physics Informed Neural Network solvers.
This class can be used to
define PINNs with multiple ``optimizers``, and/or ``models``.
By default it takes
Expand Down Expand Up @@ -72,7 +73,7 @@ def __init__(
self._clamp_params = self._clamp_inverse_problem_params
else:
self._params = None
self._clamp_params = lambda : None
self._clamp_params = lambda: None

# variable used internally to store residual losses at each epoch
# this variable save the residual at each iteration (not weighted)
Expand Down Expand Up @@ -107,7 +108,7 @@ def training_step(self, batch, _):
condition = self.problem.conditions[condition_name]
pts = batch["pts"]
# condition name is logged (if logs enabled)
self.__logged_metric = condition_name
self.__logged_metric = condition_name

if len(batch) == 2:
samples = pts[condition_idx == condition_id]
Expand Down Expand Up @@ -160,7 +161,7 @@ def loss_phys(self, samples, equation):
:rtype: LabelTensor
"""
pass

def compute_residual(self, samples, equation):
"""
Compute the residual for Physics Informed learning. This function
Expand All @@ -182,7 +183,7 @@ def compute_residual(self, samples, equation):
samples, self.forward(samples), self._params
)
return residual

def store_log(self, loss_value):
"""
Stores the loss value in the logger. This function should be
Expand All @@ -195,13 +196,13 @@ def store_log(self, loss_value):
:param torch.Tensor loss_value: The value of the loss.
"""
self.log(
self.__logged_metric+'_loss',
loss_value,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.__logged_metric + "_loss",
loss_value,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.__logged_res_losses.append(loss_value)

def on_train_epoch_end(self):
Expand All @@ -211,10 +212,10 @@ def on_train_epoch_end(self):
"""
if self.__logged_res_losses:
# storing mean loss
self.__logged_metric = 'mean'
self.__logged_metric = "mean"
self.store_log(
sum(self.__logged_res_losses)/len(self.__logged_res_losses)
)
sum(self.__logged_res_losses) / len(self.__logged_res_losses)
)
# free the logged losses
self.__logged_res_losses = []
return super().on_train_epoch_end()
Expand Down Expand Up @@ -244,4 +245,4 @@ def current_condition_name(self):
:meth:`loss_phys` to extract the condition at which the loss is
computed.
"""
return self.__logged_metric
return self.__logged_metric
34 changes: 18 additions & 16 deletions pina/solvers/pinns/causalpinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,25 +97,27 @@ def __init__(
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
:param int | float eps: The exponential decay parameter. Note that this
value is kept fixed during the training, but can be changed by means
of a callback, e.g. for annealing.
of a callback, e.g. for annealing.
"""
super().__init__(
problem=problem,
model=model,
extra_features=extra_features,
loss=loss,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
scheduler=scheduler,
scheduler_kwargs=scheduler_kwargs,
problem=problem,
model=model,
extra_features=extra_features,
loss=loss,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
scheduler=scheduler,
scheduler_kwargs=scheduler_kwargs,
)

# checking consistency
check_consistency(eps, (int,float))
check_consistency(eps, (int, float))
self._eps = eps
if not isinstance(self.problem, TimeDependentProblem):
raise ValueError('Casual PINN works only for problems'
'inheritig from TimeDependentProblem.')
raise ValueError(
"Casual PINN works only for problems"
"inheritig from TimeDependentProblem."
)

def loss_phys(self, samples, equation):
"""
Expand Down Expand Up @@ -144,14 +146,14 @@ def loss_phys(self, samples, equation):
)
time_loss.append(loss_val)
# store results
self.store_log(loss_value=float(sum(time_loss)/len(time_loss)))
self.store_log(loss_value=float(sum(time_loss) / len(time_loss)))
# concatenate residuals
time_loss = torch.stack(time_loss)
# compute weights (without the gradient storing)
with torch.no_grad():
weights = self._compute_weights(time_loss)
return (weights * time_loss).mean()

@property
def eps(self):
"""
Expand Down Expand Up @@ -205,8 +207,8 @@ def _split_tensor_into_chunks(self, tensor):
_, idx_split = time_tensor.unique(return_counts=True)
# splitting
chunks = torch.split(tensor, tuple(idx_split))
return chunks, labels # return chunks
return chunks, labels # return chunks

def _compute_weights(self, loss):
"""
Computes the weights for the physics loss based on the cumulative loss.
Expand Down
39 changes: 20 additions & 19 deletions pina/solvers/pinns/competitive_pinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def __init__(
optimizer_discriminator_kwargs,
],
extra_features=None, # CompetitivePINN doesn't take extra features
loss=loss
loss=loss,
)

# set automatic optimization for GANs
Expand All @@ -131,17 +131,15 @@ def __init__(

# assign schedulers
self._schedulers = [
scheduler_model(
self.optimizers[0], **scheduler_model_kwargs
),
scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
scheduler_discriminator(
self.optimizers[1], **scheduler_discriminator_kwargs
),
]

self._model = self.models[0]
self._discriminator = self.models[1]

def forward(self, x):
r"""
Forward pass implementation for the PINN solver. It returns the function
Expand Down Expand Up @@ -195,8 +193,11 @@ def loss_data(self, input_tensor, output_tensor):
:rtype: torch.Tensor
"""
self.optimizer_model.zero_grad()
loss_val = super().loss_data(
input_tensor, output_tensor).as_subclass(torch.Tensor)
loss_val = (
super()
.loss_data(input_tensor, output_tensor)
.as_subclass(torch.Tensor)
)
loss_val.backward()
self.optimizer_model.step()
return loss_val
Expand All @@ -221,7 +222,7 @@ def configure_optimizers(self):
)
return self.optimizers, self._schedulers

def on_train_batch_end(self,outputs, batch, batch_idx):
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints.
Expand All @@ -235,7 +236,9 @@ def on_train_batch_end(self,outputs, batch, batch_idx):
:rtype: Any
"""
# increase by one the counter of optimization to save loggers
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += 1
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
1
)
return super().on_train_batch_end(outputs, batch, batch_idx)

def _train_discriminator(self, samples, equation, discriminator_bets):
Expand All @@ -252,13 +255,14 @@ def _train_discriminator(self, samples, equation, discriminator_bets):
self.optimizer_discriminator.zero_grad()
# compute residual, we detach because the weights of the generator
# model are fixed
residual = self.compute_residual(samples=samples,
equation=equation).detach()
residual = self.compute_residual(
samples=samples, equation=equation
).detach()
# compute competitive residual, the minus is because we maximise
competitive_residual = residual * discriminator_bets
loss_val = - self.loss(
loss_val = -self.loss(
torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual
competitive_residual,
).as_subclass(torch.Tensor)
# backprop
self.manual_backward(loss_val)
Expand All @@ -283,16 +287,13 @@ def _train_model(self, samples, equation, discriminator_bets):
residual = self.compute_residual(samples=samples, equation=equation)
# store logging
with torch.no_grad():
loss_residual = self.loss(
torch.zeros_like(residual),
residual
)
loss_residual = self.loss(torch.zeros_like(residual), residual)
# compute competitive residual, discriminator_bets are detached becase
# we optimize only the generator model
competitive_residual = residual * discriminator_bets.detach()
loss_val = self.loss(
torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual
competitive_residual,
).as_subclass(torch.Tensor)
# backprop
self.manual_backward(loss_val)
Expand Down Expand Up @@ -357,4 +358,4 @@ def scheduler_discriminator(self):
:return: The scheduler for the discriminator.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self._schedulers[1]
return self._schedulers[1]
31 changes: 16 additions & 15 deletions pina/solvers/pinns/gpinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,22 +90,23 @@ def __init__(
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
"""
super().__init__(
problem=problem,
model=model,
extra_features=extra_features,
loss=loss,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
scheduler=scheduler,
scheduler_kwargs=scheduler_kwargs,
problem=problem,
model=model,
extra_features=extra_features,
loss=loss,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
scheduler=scheduler,
scheduler_kwargs=scheduler_kwargs,
)
if not isinstance(self.problem, SpatialProblem):
raise ValueError('Gradient PINN computes the gradient of the '
'PINN loss with respect to the spatial '
'coordinates, thus the PINA problem must be '
'a SpatialProblem.')
raise ValueError(
"Gradient PINN computes the gradient of the "
"PINN loss with respect to the spatial "
"coordinates, thus the PINA problem must be "
"a SpatialProblem."
)


def loss_phys(self, samples, equation):
"""
Computes the physics loss for the GPINN solver based on given
Expand All @@ -126,9 +127,9 @@ def loss_phys(self, samples, equation):
self.store_log(loss_value=float(loss_value))
# gradient PINN loss
loss_value = loss_value.reshape(-1, 1)
loss_value.labels = ['__LOSS']
loss_value.labels = ["__LOSS"]
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
g_loss_phys = self.loss(
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
)
return loss_value + g_loss_phys
return loss_value + g_loss_phys
7 changes: 2 additions & 5 deletions pina/solvers/pinns/pinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(
optimizers=[optimizer],
optimizers_kwargs=[optimizer_kwargs],
extra_features=extra_features,
loss=loss
loss=loss,
)

# check consistency
Expand Down Expand Up @@ -131,7 +131,6 @@ def loss_phys(self, samples, equation):
self.store_log(loss_value=float(loss_value))
return loss_value


def configure_optimizers(self):
"""
Optimizer configuration for the PINN
Expand All @@ -153,18 +152,16 @@ def configure_optimizers(self):
)
return self.optimizers, [self.scheduler]


@property
def scheduler(self):
"""
Scheduler for the PINN training.
"""
return self._scheduler


@property
def neural_net(self):
"""
Neural network for the PINN training.
"""
return self._neural_net
return self._neural_net
Loading

0 comments on commit 9463ae4

Please sign in to comment.