From 3456fd3df6e426ab3b415c6d237688faeb5b0796 Mon Sep 17 00:00:00 2001 From: mramanathan Date: Thu, 27 Feb 2025 04:26:01 +0000 Subject: [PATCH] Bringup tt-torch models in forge --- env/linux_requirements.txt | 4 +- .../models/pytorch/text/llama/test_llama3.py | 30 +++++++++---- .../models/pytorch/vision/dla/test_dla.py | 41 +++++++++++++++++- .../models/pytorch/vision/dla/utils/utils.py | 14 ++++++ .../efficientnet/test_efficientnet_lite.py | 43 +++++++++++++++++++ .../vision/efficientnet/utils/utils.py | 24 +++++++++++ .../pytorch/vision/ghostnet/test_ghostnet.py | 2 +- .../models/pytorch/vision/hrnet/test_hrnet.py | 5 ++- .../vision/inception/test_inception_v4.py | 15 +++++-- .../vision/mlp_mixer/test_mlp_mixer.py | 3 +- .../vision/mobilenet/test_mobilenet_v1.py | 36 ++++++++++++++++ .../pytorch/vision/mobilenet/utils/utils.py | 14 ++++++ .../pytorch/vision/resnet/test_resnet.py | 32 ++++++++------ .../pytorch/vision/resnet/utils/utils.py | 33 ++++++++++++++ .../vision/retinanet/test_retinanet.py | 32 ++++++++++++++ .../pytorch/vision/retinanet/utils/utils.py | 29 +++++++++++++ .../vision/ssd300_vgg16/test_ssd300_vgg16.py | 41 ++++++++++++++++++ .../vision/ssd300_vgg16/utils/utils.py | 29 +++++++++++++ .../test_ssdlite320_mobilenetv3.py | 41 ++++++++++++++++++ .../ssdlite320_mobilenetv3/utils/utils.py | 29 +++++++++++++ .../pytorch/vision/vovnet/test_vovnet.py | 3 +- .../pytorch/vision/xception/test_xception.py | 4 +- 22 files changed, 467 insertions(+), 37 deletions(-) create mode 100644 forge/test/models/pytorch/vision/efficientnet/utils/utils.py create mode 100644 forge/test/models/pytorch/vision/resnet/utils/utils.py create mode 100644 forge/test/models/pytorch/vision/retinanet/utils/utils.py create mode 100644 forge/test/models/pytorch/vision/ssd300_vgg16/test_ssd300_vgg16.py create mode 100644 forge/test/models/pytorch/vision/ssd300_vgg16/utils/utils.py create mode 100644 forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/test_ssdlite320_mobilenetv3.py create mode 100644 forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/utils/utils.py diff --git a/env/linux_requirements.txt b/env/linux_requirements.txt index 223d95924..63b2bf737 100644 --- a/env/linux_requirements.txt +++ b/env/linux_requirements.txt @@ -17,11 +17,11 @@ sacrebleu==2.1.0 sacremoses==0.0.53 seaborn scikit-image==0.20.0 # For DenseNet 121 HF XRay model -segmentation_models_pytorch==0.3.2 +segmentation_models_pytorch==0.4.0 sentencepiece==0.2.0 subword-nmt==0.3.8 tensorflow-hub==0.12.0 -timm==0.6.12 +timm==0.9.16 yolov5==7.0.9 # The CPU versions of torch and torch visions are used due to their size being # several GB smaller which made a large impact on the performance of CI diff --git a/forge/test/models/pytorch/text/llama/test_llama3.py b/forge/test/models/pytorch/text/llama/test_llama3.py index 48189dd3d..3f2001418 100644 --- a/forge/test/models/pytorch/text/llama/test_llama3.py +++ b/forge/test/models/pytorch/text/llama/test_llama3.py @@ -30,6 +30,8 @@ "meta-llama/Llama-3.1-8B-Instruct", "meta-llama/Llama-3.2-1B", "meta-llama/Llama-3.2-1B-Instruct", + "meta-llama/Llama-3.2-3B", + "huggyllama/llama-7b", ] @@ -145,18 +147,27 @@ def test_llama3_causal_lm(record_forge_property, variant): tokenizer = download_model(AutoTokenizer.from_pretrained, variant) tokenizer.pad_token = tokenizer.eos_token framework_model = download_model(AutoModelForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False) - + framework_model.eval() # Input prompt input_prompt = "Hey how are you doing today?" # Tokenize input - inputs = tokenizer( - input_prompt, - return_tensors="pt", - max_length=256, - pad_to_max_length=True, - truncation=True, - ) + if variant == "meta-llama/Llama-3.2-3B" or "huggyllama/llama-7b": + inputs = tokenizer.encode_plus( + input_prompt, + return_tensors="pt", + max_length=32, + padding="max_length", + truncation=True, + ) + else: + inputs = tokenizer( + input_prompt, + return_tensors="pt", + max_length=256, + pad_to_max_length=True, + truncation=True, + ) input_ids = inputs["input_ids"] attn_mask = inputs["attention_mask"] @@ -181,6 +192,8 @@ def test_llama3_causal_lm(record_forge_property, variant): def test_llama3_sequence_classification(record_forge_property, variant): pytest.skip("Skipping due to the current CI/CD pipeline limitations") + if variant in {"llama3 3.3B", "llama 7B"}: + pytest.skip(f"Skipping test for variant {variant}") # Build Module Name module_name = build_module_name( framework=Framework.PYTORCH, @@ -198,6 +211,7 @@ def test_llama3_sequence_classification(record_forge_property, variant): framework_model = download_model( AutoModelForSequenceClassification.from_pretrained, variant, use_cache=False, return_dict=False ) + framework_model.eval() # Input prompt input_prompt = "Movie is great" diff --git a/forge/test/models/pytorch/vision/dla/test_dla.py b/forge/test/models/pytorch/vision/dla/test_dla.py index dd8095ca8..4d942dd6b 100644 --- a/forge/test/models/pytorch/vision/dla/test_dla.py +++ b/forge/test/models/pytorch/vision/dla/test_dla.py @@ -6,7 +6,11 @@ import forge from forge.verify.verify import verify -from test.models.pytorch.vision.dla.utils.utils import load_dla_model, post_processing +from test.models.pytorch.vision.dla.utils.utils import ( + load_dla_model, + load_timm_model, + post_processing, +) from test.models.utils import Framework, Source, Task, build_module_name variants = [ @@ -51,3 +55,38 @@ def test_dla_pytorch(record_forge_property, variant): # post processing post_processing(output) + + +variants = ["dla34.in1k"] + + +@pytest.mark.nightly +@pytest.mark.parametrize("variant", variants) +def test_dla_timm(record_forge_property, variant): + + # Build Module Name + module_name = build_module_name( + framework=Framework.PYTORCH, + model="dla", + variant=variant, + source=Source.TIMM, + task=Task.IMAGE_CLASSIFICATION, + ) + + # Record Forge Property + record_forge_property("model_name", module_name) + + # Load the model and prepare input data + framework_model, inputs = load_timm_model(variant) + + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) + + # Model Verification + verify(inputs, framework_model, compiled_model) + + # Inference + output = compiled_model(*inputs) + + # Post processing + post_processing(output) diff --git a/forge/test/models/pytorch/vision/dla/utils/utils.py b/forge/test/models/pytorch/vision/dla/utils/utils.py index ee2769a33..2954a7643 100644 --- a/forge/test/models/pytorch/vision/dla/utils/utils.py +++ b/forge/test/models/pytorch/vision/dla/utils/utils.py @@ -3,8 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 import os import urllib +from urllib.request import urlopen import requests +import timm import torch import torchvision.transforms as transforms from PIL import Image @@ -39,6 +41,18 @@ def load_dla_model(variant): return framework_model, inputs +def load_timm_model(model_name): + model = timm.create_model(model_name, pretrained=True) + + img = Image.open( + urlopen("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png") + ) + data_config = timm.data.resolve_model_data_config(model) + transforms = timm.data.create_transform(**data_config, is_training=False) + input_batch = transforms(img).unsqueeze(0) # unsqueeze single image into batch of 1 + return model, input_batch + + url = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" diff --git a/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py b/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py index d09dc77a1..b3450bfce 100644 --- a/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py +++ b/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py @@ -10,6 +10,7 @@ from test.models.pytorch.vision.efficientnet.utils import ( src_efficientnet_lite as efflite, ) +from test.models.pytorch.vision.efficientnet.utils.utils import load_inputs, load_model from test.models.utils import Framework, Source, Task, build_module_name @@ -181,3 +182,45 @@ def test_efficientnet_lite_4_pytorch(record_forge_property): # Model Verification verify(inputs, framework_model, compiled_model) + + +variants = [ + "tf_efficientnet_lite0.in1k", + "tf_efficientnet_lite1.in1k", + "tf_efficientnet_lite2.in1k", + "tf_efficientnet_lite3.in1k", + "tf_efficientnet_lite4.in1k", +] + + +@pytest.mark.nightly +@pytest.mark.parametrize("variant", variants) +def test_efficientnet_lite_timm(record_forge_property, variant): + + # Build Module Name + module_name = build_module_name( + framework=Framework.PYTORCH, + model="efficientnet_lite", + variant=variant, + source=Source.TIMM, + task=Task.IMAGE_CLASSIFICATION, + ) + + # Record Forge Property + record_forge_property("model_name", module_name) + + # Load the model and prepare input data + framework_model = load_model(variant) + inputs = load_inputs(framework_model) + + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) + + # Model Verification + verify(inputs, framework_model, compiled_model) + + # Inference + output = compiled_model(*inputs) + + # Post processing + post_processing(output) diff --git a/forge/test/models/pytorch/vision/efficientnet/utils/utils.py b/forge/test/models/pytorch/vision/efficientnet/utils/utils.py new file mode 100644 index 000000000..409454f1f --- /dev/null +++ b/forge/test/models/pytorch/vision/efficientnet/utils/utils.py @@ -0,0 +1,24 @@ +# SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +from urllib.request import urlopen + +import timm +from PIL import Image + + +def load_model(model_name): + model = timm.create_model(model_name, pretrained=True) + return model + + +def load_inputs(framework_model): + import timm + + img = Image.open( + urlopen("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png") + ) + data_config = timm.data.resolve_model_data_config(framework_model) + transforms = timm.data.create_transform(**data_config, is_training=False) + input_batch = transforms(img).unsqueeze(0) # unsqueeze single image into batch of 1 + return input_batch diff --git a/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py b/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py index 87888a2d7..fd1ea5190 100644 --- a/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py +++ b/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py @@ -13,7 +13,7 @@ ) from test.models.utils import Framework, Source, Task, build_module_name -variants = ["ghostnet_100"] +variants = ["ghostnet_100", "ghostnet_100.in1k", "ghostnetv2_100.in1k"] @pytest.mark.push diff --git a/forge/test/models/pytorch/vision/hrnet/test_hrnet.py b/forge/test/models/pytorch/vision/hrnet/test_hrnet.py index 131b96961..3ebc231dc 100644 --- a/forge/test/models/pytorch/vision/hrnet/test_hrnet.py +++ b/forge/test/models/pytorch/vision/hrnet/test_hrnet.py @@ -148,14 +148,15 @@ def generate_model_hrnet_imgcls_timm_pytorch(variant): "hrnet_w44", "hrnet_w48", "hrnet_w64", + "hrnet_w18.ms_aug_in1k", ] @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) def test_hrnet_timm_pytorch(record_forge_property, variant): - if variant != "hrnet_w18_small": - pytest.skip("Skipping due to the current CI/CD pipeline limitations") + # if variant != "hrnet_w18_small": + # pytest.skip("Skipping due to the current CI/CD pipeline limitations") # Build Module Name module_name = build_module_name( diff --git a/forge/test/models/pytorch/vision/inception/test_inception_v4.py b/forge/test/models/pytorch/vision/inception/test_inception_v4.py index d9fd26d9c..5e7bb8814 100644 --- a/forge/test/models/pytorch/vision/inception/test_inception_v4.py +++ b/forge/test/models/pytorch/vision/inception/test_inception_v4.py @@ -51,19 +51,26 @@ def generate_model_inceptionV4_imgcls_timm_pytorch(variant): return framework_model, [img_tensor] +variants = ["inception_v4", "inception_v4.tf_in1k"] + + @pytest.mark.nightly -def test_inception_v4_timm_pytorch(record_forge_property): - pytest.skip("Skipping due to the current CI/CD pipeline limitations") +@pytest.mark.parametrize("variant", variants) +def test_inception_v4_timm_pytorch(record_forge_property, variant): # Build Module Name module_name = build_module_name( - framework=Framework.PYTORCH, model="inception", variant="v4", source=Source.TIMM, task=Task.IMAGE_CLASSIFICATION + framework=Framework.PYTORCH, + model="inception", + variant=variant, + source=Source.TIMM, + task=Task.IMAGE_CLASSIFICATION, ) # Record Forge Property record_forge_property("model_name", module_name) - framework_model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch("inception_v4") + framework_model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch(variant) # Forge compile framework model compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) diff --git a/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py b/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py index ffcb86e74..e52bc16d9 100644 --- a/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py +++ b/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py @@ -27,14 +27,13 @@ "mixer_l32_224", "mixer_s16_224", "mixer_s32_224", + "mixer_b16_224.goog_in21k", ] @pytest.mark.nightly @pytest.mark.parametrize("variant", varaints, ids=varaints) def test_mlp_mixer_timm_pytorch(record_forge_property, variant): - if variant != "mixer_b16_224": - pytest.skip("Skipping due to the current CI/CD pipeline limitations") # Build Module Name module_name = build_module_name( diff --git a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py index 933347c14..973656ff9 100644 --- a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py +++ b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py @@ -11,6 +11,7 @@ from test.models.pytorch.vision.mobilenet.utils.utils import ( load_mobilenet_model, + load_timm_model, post_processing, ) from test.models.utils import Framework, Source, Task, build_module_name @@ -129,3 +130,38 @@ def test_mobilenetv1_224(record_forge_property, variant): # Model Verification verify(inputs, framework_model, compiled_model) + + +variants = ["mobilenetv1_100.ra4_e3600_r224_in1k"] + + +@pytest.mark.nightly +@pytest.mark.parametrize("variant", variants) +def test_mobilenet_v1_timm(record_forge_property, variant): + + # Build Module Name + module_name = build_module_name( + framework=Framework.PYTORCH, + model="mobilenet_v1", + variant=variant, + source=Source.TIMM, + task=Task.IMAGE_CLASSIFICATION, + ) + + # Record Forge Property + record_forge_property("model_name", module_name) + + # Load the model and prepare input data + framework_model, inputs = load_timm_model(variant) + + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) + + # Model Verification + verify(inputs, framework_model, compiled_model) + + # Inference + output = compiled_model(*inputs) + + # Post processing + post_processing(output) diff --git a/forge/test/models/pytorch/vision/mobilenet/utils/utils.py b/forge/test/models/pytorch/vision/mobilenet/utils/utils.py index 02ece294a..df7ca59ad 100644 --- a/forge/test/models/pytorch/vision/mobilenet/utils/utils.py +++ b/forge/test/models/pytorch/vision/mobilenet/utils/utils.py @@ -3,7 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import os import urllib +from urllib.request import urlopen +import timm import torch from PIL import Image from torchvision import transforms @@ -42,6 +44,18 @@ def load_mobilenet_model(model_name): return model, [input_batch] +def load_timm_model(model_name): + model = timm.create_model(model_name, pretrained=True) + + img = Image.open( + urlopen("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png") + ) + data_config = timm.data.resolve_model_data_config(model) + transforms = timm.data.create_transform(**data_config, is_training=False) + input_batch = transforms(img).unsqueeze(0) # unsqueeze single image into batch of 1 + return model, input_batch + + url = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" diff --git a/forge/test/models/pytorch/vision/resnet/test_resnet.py b/forge/test/models/pytorch/vision/resnet/test_resnet.py index bad8689f2..9fdd0c93a 100644 --- a/forge/test/models/pytorch/vision/resnet/test_resnet.py +++ b/forge/test/models/pytorch/vision/resnet/test_resnet.py @@ -8,7 +8,6 @@ import torch from datasets import load_dataset from tabulate import tabulate -from torchvision.models.resnet import resnet50 from transformers import AutoImageProcessor, ResNetForImageClassification import forge @@ -16,6 +15,11 @@ from forge.verify.value_checkers import AutomaticValueChecker from forge.verify.verify import verify +from test.models.pytorch.vision.resnet.utils.utils import ( + load_input, + load_model, + variants_with_weights, +) from test.models.utils import Framework, Source, Task, build_module_name from test.utils import download_model @@ -114,23 +118,27 @@ def test_resnet_timm(record_forge_property): @pytest.mark.nightly -def test_resnet_torchvision(record_forge_property): - # Record model details +@pytest.mark.parametrize("variant", variants_with_weights.keys()) +def test_resnet_torchvision(record_forge_property, variant): + + # Build Module Name module_name = build_module_name( framework=Framework.PYTORCH, model="resnet", - source=Source.TORCHVISION, - variant="50", + variant=variant, task=Task.IMAGE_CLASSIFICATION, + source=Source.TORCHVISION, ) + + # Record Forge Property record_forge_property("model_name", module_name) - # Load framework model - framework_model = resnet50() + # Load model and input + framework_model = load_model(variant) + inputs = load_input(variants_with_weights[variant]) - # Compile model - input_sample = [torch.rand(1, 3, 224, 224)] - compiled_model = forge.compile(framework_model, input_sample) + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) - # Verify data on sample input - verify(input_sample, framework_model, compiled_model, VerifyConfig(value_checker=AutomaticValueChecker(pcc=0.95))) + # Model Verification + verify(inputs, framework_model, compiled_model) diff --git a/forge/test/models/pytorch/vision/resnet/utils/utils.py b/forge/test/models/pytorch/vision/resnet/utils/utils.py new file mode 100644 index 000000000..11ac189ff --- /dev/null +++ b/forge/test/models/pytorch/vision/resnet/utils/utils.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import requests +import torch +from PIL import Image +from torchvision import models + +variants_with_weights = { + "resnet18": "ResNet18_Weights", + "resnet34": "ResNet34_Weights", + "resnet50": "ResNet50_Weights", + "resnet101": "ResNet101_Weights", + "resnet152": "ResNet152_Weights", +} + + +def load_model(variant): + weight_name = variants_with_weights[variant] + weights = getattr(models, weight_name).DEFAULT + model = getattr(models, variant)(weights=weights) + model.eval() + return model + + +def load_input(weight_name): + weights = getattr(models, weight_name).DEFAULT + preprocess = weights.transforms() + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + img_t = preprocess(image) + batch_t = torch.unsqueeze(img_t, 0) + return [batch_t] diff --git a/forge/test/models/pytorch/vision/retinanet/test_retinanet.py b/forge/test/models/pytorch/vision/retinanet/test_retinanet.py index 60783954b..d46b6294c 100644 --- a/forge/test/models/pytorch/vision/retinanet/test_retinanet.py +++ b/forge/test/models/pytorch/vision/retinanet/test_retinanet.py @@ -13,6 +13,11 @@ from test.models.pytorch.vision.retinanet.utils.image_utils import img_preprocess from test.models.pytorch.vision.retinanet.utils.model import Model +from test.models.pytorch.vision.retinanet.utils.utils import ( + load_input, + load_model, + variants_with_weights, +) from test.models.utils import Framework, Source, Task, build_module_name variants = [ @@ -77,3 +82,30 @@ def test_retinanet(record_forge_property, variant): # Delete the extracted folder and the zip file shutil.rmtree(extracted_path) os.remove(local_zip_path) + + +@pytest.mark.nightly +@pytest.mark.parametrize("variant", variants_with_weights.keys()) +def test_retinanet_torchvision(record_forge_property, variant): + + # Build Module Name + module_name = build_module_name( + framework=Framework.PYTORCH, + model="retinanet", + variant=variant, + task=Task.IMAGE_CLASSIFICATION, + source=Source.TORCHVISION, + ) + + # Record Forge Property + record_forge_property("model_name", module_name) + + # Load model and input + framework_model = load_model(variant) + inputs = load_input(variants_with_weights[variant]) + + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) + + # Model Verification + verify(inputs, framework_model, compiled_model) diff --git a/forge/test/models/pytorch/vision/retinanet/utils/utils.py b/forge/test/models/pytorch/vision/retinanet/utils/utils.py new file mode 100644 index 000000000..f5c637658 --- /dev/null +++ b/forge/test/models/pytorch/vision/retinanet/utils/utils.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import requests +import torch +from PIL import Image +from torchvision import models + +variants_with_weights = { + "retinanet_resnet50_fpn_v2": "RetinaNet_ResNet50_FPN_V2_Weights", +} + + +def load_model(variant): + weight_name = variants_with_weights[variant] + weights = getattr(models.detection, weight_name).DEFAULT + model = getattr(models.detection, variant)(weights=weights) + model.eval() + return model + + +def load_input(weight_name): + weights = getattr(models.detection, weight_name).DEFAULT + preprocess = weights.transforms() + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + img_t = preprocess(image) + batch_t = torch.unsqueeze(img_t, 0) + return [batch_t] diff --git a/forge/test/models/pytorch/vision/ssd300_vgg16/test_ssd300_vgg16.py b/forge/test/models/pytorch/vision/ssd300_vgg16/test_ssd300_vgg16.py new file mode 100644 index 000000000..519f0d247 --- /dev/null +++ b/forge/test/models/pytorch/vision/ssd300_vgg16/test_ssd300_vgg16.py @@ -0,0 +1,41 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +import pytest + +import forge +from forge.verify.verify import verify + +from test.models.pytorch.vision.ssd300_vgg16.utils.utils import ( + load_input, + load_model, + variants_with_weights, +) +from test.models.utils import Framework, Source, Task, build_module_name + + +@pytest.mark.nightly +@pytest.mark.parametrize("variant", variants_with_weights.keys()) +def test_ssd300_vgg16(record_forge_property, variant): + + # Build Module Name + module_name = build_module_name( + framework=Framework.PYTORCH, + model="ssd300_vgg16", + variant=variant, + task=Task.IMAGE_CLASSIFICATION, + source=Source.TORCHVISION, + ) + + # Record Forge Property + record_forge_property("model_name", module_name) + + # Load model and input + framework_model = load_model(variant) + inputs = load_input(variants_with_weights[variant]) + + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) + + # Model Verification + verify(inputs, framework_model, compiled_model) diff --git a/forge/test/models/pytorch/vision/ssd300_vgg16/utils/utils.py b/forge/test/models/pytorch/vision/ssd300_vgg16/utils/utils.py new file mode 100644 index 000000000..d179f2a4f --- /dev/null +++ b/forge/test/models/pytorch/vision/ssd300_vgg16/utils/utils.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import requests +import torch +from PIL import Image +from torchvision import models + +variants_with_weights = { + "ssd300_vgg16": "SSD300_VGG16_Weights", +} + + +def load_model(variant): + weight_name = variants_with_weights[variant] + weights = getattr(models.detection, weight_name).DEFAULT + model = getattr(models.detection, variant)(weights=weights) + model.eval() + return model + + +def load_input(weight_name): + weights = getattr(models.detection, weight_name).DEFAULT + preprocess = weights.transforms() + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + img_t = preprocess(image) + batch_t = torch.unsqueeze(img_t, 0) + return [batch_t] diff --git a/forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/test_ssdlite320_mobilenetv3.py b/forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/test_ssdlite320_mobilenetv3.py new file mode 100644 index 000000000..ed48cf23e --- /dev/null +++ b/forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/test_ssdlite320_mobilenetv3.py @@ -0,0 +1,41 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +import pytest + +import forge +from forge.verify.verify import verify + +from test.models.pytorch.vision.ssdlite320_mobilenetv3.utils.utils import ( + load_input, + load_model, + variants_with_weights, +) +from test.models.utils import Framework, Source, Task, build_module_name + + +@pytest.mark.nightly +@pytest.mark.parametrize("variant", variants_with_weights.keys()) +def test_ssdlite320_mobilenetv3(record_forge_property, variant): + + # Build Module Name + module_name = build_module_name( + framework=Framework.PYTORCH, + model="ssdlite320_mobilenetv3", + variant=variant, + task=Task.IMAGE_CLASSIFICATION, + source=Source.TORCHVISION, + ) + + # Record Forge Property + record_forge_property("model_name", module_name) + + # Load model and input + framework_model = load_model(variant) + inputs = load_input(variants_with_weights[variant]) + + # Forge compile framework model + compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) + + # Model Verification + verify(inputs, framework_model, compiled_model) diff --git a/forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/utils/utils.py b/forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/utils/utils.py new file mode 100644 index 000000000..4432e0148 --- /dev/null +++ b/forge/test/models/pytorch/vision/ssdlite320_mobilenetv3/utils/utils.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import requests +import torch +from PIL import Image +from torchvision import models + +variants_with_weights = { + "ssdlite320_mobilenet_v3_large": "SSDLite320_MobileNet_V3_Large_Weights", +} + + +def load_model(variant): + weight_name = variants_with_weights[variant] + weights = getattr(models.detection, weight_name).DEFAULT + model = getattr(models.detection, variant)(weights=weights) + model.eval() + return model + + +def load_input(weight_name): + weights = getattr(models.detection, weight_name).DEFAULT + preprocess = weights.transforms() + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + img_t = preprocess(image) + batch_t = torch.unsqueeze(img_t, 0) + return [batch_t] diff --git a/forge/test/models/pytorch/vision/vovnet/test_vovnet.py b/forge/test/models/pytorch/vision/vovnet/test_vovnet.py index 491724bcc..9343fc8f0 100644 --- a/forge/test/models/pytorch/vision/vovnet/test_vovnet.py +++ b/forge/test/models/pytorch/vision/vovnet/test_vovnet.py @@ -124,13 +124,12 @@ def generate_model_vovnet_imgcls_timm_pytorch(variant): return model, [image_tensor], {} -variants = ["ese_vovnet19b_dw", "ese_vovnet39b", "ese_vovnet99b"] +variants = ["ese_vovnet19b_dw", "ese_vovnet39b", "ese_vovnet99b", "ese_vovnet19b_dw.ra_in1k"] @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) def test_vovnet_timm_pytorch(record_forge_property, variant): - pytest.skip("Skipping due to the current CI/CD pipeline limitations") # Build Module Name module_name = build_module_name( diff --git a/forge/test/models/pytorch/vision/xception/test_xception.py b/forge/test/models/pytorch/vision/xception/test_xception.py index cc4bc3fee..bf11c6866 100644 --- a/forge/test/models/pytorch/vision/xception/test_xception.py +++ b/forge/test/models/pytorch/vision/xception/test_xception.py @@ -35,14 +35,12 @@ def generate_model_xception_imgcls_timm(variant): return framework_model, [img_tensor] -variants = ["xception", "xception41", "xception65", "xception71"] +variants = ["xception", "xception41", "xception65", "xception71", "xception71.tf_in1k"] @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) def test_xception_timm(record_forge_property, variant): - if variant != "xception": - pytest.skip("Skipping due to the current CI/CD pipeline limitations") # Build Module Name module_name = build_module_name(