-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #4 from membraneframework-labs/classifiers
Add EfficientNetV2 and SqueezeNet
- Loading branch information
Showing
23 changed files
with
308 additions
and
82 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
defmodule ExVision.Classification.EfficientNet_V2_L do | ||
@moduledoc """ | ||
An object classifier based on EfficientNet_V2_L. | ||
Exported from `torchvision`. | ||
Weights from Imagenet 1k. | ||
""" | ||
use ExVision.Model.Definition.Ortex, | ||
model: "efficientnet_v2_l_classifier.onnx", | ||
categories: "priv/categories/imagenet_v2_categories.json" | ||
|
||
use ExVision.Classification.GenericClassifier | ||
|
||
@impl true | ||
def preprocessing(image, _metadata) do | ||
image | ||
|> ExVision.Utils.resize({480, 480}) | ||
|> NxImage.normalize( | ||
Nx.tensor([0.5, 0.5, 0.5]), | ||
Nx.tensor([0.5, 0.5, 0.5]), | ||
channels: :first | ||
) | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
defmodule ExVision.Classification.EfficientNet_V2_M do | ||
@moduledoc """ | ||
An object classifier based on EfficientNet_V2_M. | ||
Exported from `torchvision`. | ||
Weights from Imagenet 1k. | ||
""" | ||
use ExVision.Model.Definition.Ortex, | ||
model: "efficientnet_v2_m_classifier.onnx", | ||
categories: "priv/categories/imagenet_v2_categories.json" | ||
|
||
use ExVision.Classification.GenericClassifier | ||
|
||
@impl true | ||
def preprocessing(image, _metadata) do | ||
image | ||
|> ExVision.Utils.resize({480, 480}) | ||
|> NxImage.normalize( | ||
Nx.tensor([0.485, 0.456, 0.406]), | ||
Nx.tensor([0.229, 0.224, 0.225]), | ||
channels: :first | ||
) | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
defmodule ExVision.Classification.EfficientNet_V2_S do | ||
@moduledoc """ | ||
An object classifier based on EfficientNet_V2_S. | ||
Exported from `torchvision`. | ||
Weights from Imagenet 1k. | ||
""" | ||
use ExVision.Model.Definition.Ortex, | ||
model: "efficientnet_v2_s_classifier.onnx", | ||
categories: "priv/categories/imagenet_v2_categories.json" | ||
|
||
use ExVision.Classification.GenericClassifier | ||
|
||
@impl true | ||
def preprocessing(image, _metadata) do | ||
image | ||
|> ExVision.Utils.resize({384, 384}) | ||
|> NxImage.normalize( | ||
Nx.tensor([0.485, 0.456, 0.406]), | ||
Nx.tensor([0.229, 0.224, 0.225]), | ||
channels: :first | ||
) | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
defmodule ExVision.Classification.GenericClassifier do | ||
@moduledoc false | ||
|
||
# Contains a default implementation of post processing for TorchVision classifiers | ||
# To use: `use ExVision.Classification.GenericClassifier` | ||
|
||
alias ExVision.Utils | ||
|
||
alias ExVision.Types.ImageMetadata | ||
|
||
@typep output_t() :: %{atom() => number()} | ||
|
||
@spec postprocessing(map(), ImageMetadata.t(), [atom()]) :: output_t() | ||
def postprocessing(%{"output" => scores}, _metadata, categories) do | ||
scores | ||
|> Nx.backend_transfer() | ||
|> Nx.flatten() | ||
|> Utils.softmax() | ||
|> Nx.to_flat_list() | ||
|> then(&Enum.zip(categories, &1)) | ||
|> Map.new() | ||
end | ||
|
||
defmacro __using__(_opts) do | ||
quote do | ||
@typedoc """ | ||
A type describing the output of a classification model as a mapping of category to probability. | ||
""" | ||
@type output_t() :: %{category_t() => number()} | ||
|
||
@impl true | ||
@spec postprocessing(map(), ExVision.Types.ImageMetadata.t()) :: output_t() | ||
def postprocessing(output, metadata) do | ||
ExVision.Classification.GenericClassifier.postprocessing(output, metadata, categories()) | ||
end | ||
|
||
defoverridable postprocessing: 2 | ||
end | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
defmodule ExVision.Classification.SqueezeNet1_1 do | ||
@moduledoc """ | ||
An object classifier based on SqueezeNet1_1. | ||
Exported from `torchvision`. | ||
Weights from Imagenet 1k. | ||
""" | ||
use ExVision.Model.Definition.Ortex, | ||
model: "squeezenet1_1_classifier.onnx", | ||
categories: "priv/categories/imagenet_v2_categories.json" | ||
|
||
use ExVision.Classification.GenericClassifier | ||
|
||
@impl true | ||
def preprocessing(image, _metadata) do | ||
image | ||
|> ExVision.Utils.resize({224, 224}) | ||
|> NxImage.normalize( | ||
Nx.tensor([0.485, 0.456, 0.406]), | ||
Nx.tensor([0.229, 0.224, 0.225]), | ||
channels: :first | ||
) | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
import argparse | ||
from torchvision.transforms.functional import to_tensor, resize | ||
import torch | ||
import json | ||
from pathlib import Path | ||
from PIL import Image | ||
|
||
|
||
def export(model_builder, Model_Weights, input_shape): | ||
base_dir = Path(f"models/classification/{model_builder.__name__}") | ||
base_dir.mkdir(parents=True, exist_ok=True) | ||
|
||
model_file = base_dir / "model.onnx" | ||
categories_file = base_dir / "categories.json" | ||
|
||
weights = Model_Weights.DEFAULT | ||
model = model_builder(weights=weights) | ||
model.eval() | ||
|
||
categories = [x.lower().replace(" ", "_") | ||
for x in weights.meta["categories"]] | ||
transforms = weights.transforms() | ||
|
||
with open(categories_file, "w") as f: | ||
json.dump(categories, f) | ||
|
||
onnx_input = to_tensor(Image.open("test/assets/cat.jpg")).unsqueeze(0) | ||
onnx_input = resize(onnx_input, input_shape) | ||
onnx_input = transforms(onnx_input) | ||
|
||
torch.onnx.export( | ||
model, | ||
onnx_input, | ||
str(model_file), | ||
verbose=False, | ||
input_names=["input"], | ||
output_names=["output"], | ||
dynamic_axes={ | ||
"input": {0: "batch_size"}, | ||
"output": {0: "batch_size"} | ||
}, | ||
export_params=True, | ||
) | ||
|
||
expected_output: torch.Tensor = model(onnx_input) | ||
expected_output = expected_output.softmax(dim=1) | ||
|
||
result = dict(zip(categories, expected_output[0].tolist())) | ||
|
||
file = Path( | ||
f"test/assets/results/classification/{model_builder.__name__}.json" | ||
) | ||
file.parent.mkdir(exist_ok=True, parents=True) | ||
|
||
with file.open("w") as f: | ||
json.dump(result, f) | ||
|
||
|
||
parser = argparse.ArgumentParser() | ||
parser.add_argument("model") | ||
args = parser.parse_args() | ||
|
||
match(args.model): | ||
case "mobilenet_v3_small": | ||
from torchvision.models import mobilenet_v3_small, MobileNet_V3_Small_Weights | ||
export(mobilenet_v3_small, MobileNet_V3_Small_Weights, [224, 224]) | ||
case "efficientnet_v2_s": | ||
from torchvision.models import efficientnet_v2_s, EfficientNet_V2_S_Weights | ||
export(efficientnet_v2_s, EfficientNet_V2_S_Weights, [384, 384]) | ||
case "efficientnet_v2_m": | ||
from torchvision.models import efficientnet_v2_m, EfficientNet_V2_M_Weights | ||
export(efficientnet_v2_m, EfficientNet_V2_M_Weights, [480, 480]) | ||
case "efficientnet_v2_l": | ||
from torchvision.models import efficientnet_v2_l, EfficientNet_V2_L_Weights | ||
export(efficientnet_v2_l, EfficientNet_V2_L_Weights, [480, 480]) | ||
case "squeezenet1_1": | ||
from torchvision.models import squeezenet1_1, SqueezeNet1_1_Weights | ||
export(squeezenet1_1, SqueezeNet1_1_Weights, [224, 224]) | ||
case _: | ||
print("Model not found") |
Oops, something went wrong.