Skip to content

Commit

Permalink
Bringup tt-torch models in forge
Browse files Browse the repository at this point in the history
  • Loading branch information
meenakshiramanathan1 committed Feb 27, 2025
1 parent d3e67bd commit 3456fd3
Show file tree
Hide file tree
Showing 22 changed files with 467 additions and 37 deletions.
4 changes: 2 additions & 2 deletions env/linux_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ sacrebleu==2.1.0
sacremoses==0.0.53
seaborn
scikit-image==0.20.0 # For DenseNet 121 HF XRay model
segmentation_models_pytorch==0.3.2
segmentation_models_pytorch==0.4.0
sentencepiece==0.2.0
subword-nmt==0.3.8
tensorflow-hub==0.12.0
timm==0.6.12
timm==0.9.16
yolov5==7.0.9
# The CPU versions of torch and torch visions are used due to their size being
# several GB smaller which made a large impact on the performance of CI
Expand Down
30 changes: 22 additions & 8 deletions forge/test/models/pytorch/text/llama/test_llama3.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
"meta-llama/Llama-3.1-8B-Instruct",
"meta-llama/Llama-3.2-1B",
"meta-llama/Llama-3.2-1B-Instruct",
"meta-llama/Llama-3.2-3B",
"huggyllama/llama-7b",
]


Expand Down Expand Up @@ -145,18 +147,27 @@ def test_llama3_causal_lm(record_forge_property, variant):
tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
tokenizer.pad_token = tokenizer.eos_token
framework_model = download_model(AutoModelForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False)

framework_model.eval()
# Input prompt
input_prompt = "Hey how are you doing today?"

# Tokenize input
inputs = tokenizer(
input_prompt,
return_tensors="pt",
max_length=256,
pad_to_max_length=True,
truncation=True,
)
if variant == "meta-llama/Llama-3.2-3B" or "huggyllama/llama-7b":
inputs = tokenizer.encode_plus(
input_prompt,
return_tensors="pt",
max_length=32,
padding="max_length",
truncation=True,
)
else:
inputs = tokenizer(
input_prompt,
return_tensors="pt",
max_length=256,
pad_to_max_length=True,
truncation=True,
)
input_ids = inputs["input_ids"]
attn_mask = inputs["attention_mask"]

Expand All @@ -181,6 +192,8 @@ def test_llama3_causal_lm(record_forge_property, variant):
def test_llama3_sequence_classification(record_forge_property, variant):
pytest.skip("Skipping due to the current CI/CD pipeline limitations")

if variant in {"llama3 3.3B", "llama 7B"}:
pytest.skip(f"Skipping test for variant {variant}")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
Expand All @@ -198,6 +211,7 @@ def test_llama3_sequence_classification(record_forge_property, variant):
framework_model = download_model(
AutoModelForSequenceClassification.from_pretrained, variant, use_cache=False, return_dict=False
)
framework_model.eval()

# Input prompt
input_prompt = "Movie is great"
Expand Down
41 changes: 40 additions & 1 deletion forge/test/models/pytorch/vision/dla/test_dla.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@
import forge
from forge.verify.verify import verify

from test.models.pytorch.vision.dla.utils.utils import load_dla_model, post_processing
from test.models.pytorch.vision.dla.utils.utils import (
load_dla_model,
load_timm_model,
post_processing,
)
from test.models.utils import Framework, Source, Task, build_module_name

variants = [
Expand Down Expand Up @@ -51,3 +55,38 @@ def test_dla_pytorch(record_forge_property, variant):

# post processing
post_processing(output)


variants = ["dla34.in1k"]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_dla_timm(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="dla",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

# Load the model and prepare input data
framework_model, inputs = load_timm_model(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)

# Inference
output = compiled_model(*inputs)

# Post processing
post_processing(output)
14 changes: 14 additions & 0 deletions forge/test/models/pytorch/vision/dla/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
# SPDX-License-Identifier: Apache-2.0
import os
import urllib
from urllib.request import urlopen

import requests
import timm
import torch
import torchvision.transforms as transforms
from PIL import Image
Expand Down Expand Up @@ -39,6 +41,18 @@ def load_dla_model(variant):
return framework_model, inputs


def load_timm_model(model_name):
model = timm.create_model(model_name, pretrained=True)

img = Image.open(
urlopen("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png")
)
data_config = timm.data.resolve_model_data_config(model)
transforms = timm.data.create_transform(**data_config, is_training=False)
input_batch = transforms(img).unsqueeze(0) # unsqueeze single image into batch of 1
return model, input_batch


url = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from test.models.pytorch.vision.efficientnet.utils import (
src_efficientnet_lite as efflite,
)
from test.models.pytorch.vision.efficientnet.utils.utils import load_inputs, load_model
from test.models.utils import Framework, Source, Task, build_module_name


Expand Down Expand Up @@ -181,3 +182,45 @@ def test_efficientnet_lite_4_pytorch(record_forge_property):

# Model Verification
verify(inputs, framework_model, compiled_model)


variants = [
"tf_efficientnet_lite0.in1k",
"tf_efficientnet_lite1.in1k",
"tf_efficientnet_lite2.in1k",
"tf_efficientnet_lite3.in1k",
"tf_efficientnet_lite4.in1k",
]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_efficientnet_lite_timm(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="efficientnet_lite",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

# Load the model and prepare input data
framework_model = load_model(variant)
inputs = load_inputs(framework_model)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)

# Inference
output = compiled_model(*inputs)

# Post processing
post_processing(output)
24 changes: 24 additions & 0 deletions forge/test/models/pytorch/vision/efficientnet/utils/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC
#
# SPDX-License-Identifier: Apache-2.0
from urllib.request import urlopen

import timm
from PIL import Image


def load_model(model_name):
model = timm.create_model(model_name, pretrained=True)
return model


def load_inputs(framework_model):
import timm

img = Image.open(
urlopen("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png")
)
data_config = timm.data.resolve_model_data_config(framework_model)
transforms = timm.data.create_transform(**data_config, is_training=False)
input_batch = transforms(img).unsqueeze(0) # unsqueeze single image into batch of 1
return input_batch
2 changes: 1 addition & 1 deletion forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
)
from test.models.utils import Framework, Source, Task, build_module_name

variants = ["ghostnet_100"]
variants = ["ghostnet_100", "ghostnet_100.in1k", "ghostnetv2_100.in1k"]


@pytest.mark.push
Expand Down
5 changes: 3 additions & 2 deletions forge/test/models/pytorch/vision/hrnet/test_hrnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,14 +148,15 @@ def generate_model_hrnet_imgcls_timm_pytorch(variant):
"hrnet_w44",
"hrnet_w48",
"hrnet_w64",
"hrnet_w18.ms_aug_in1k",
]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_hrnet_timm_pytorch(record_forge_property, variant):
if variant != "hrnet_w18_small":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# if variant != "hrnet_w18_small":
# pytest.skip("Skipping due to the current CI/CD pipeline limitations")

# Build Module Name
module_name = build_module_name(
Expand Down
15 changes: 11 additions & 4 deletions forge/test/models/pytorch/vision/inception/test_inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,19 +51,26 @@ def generate_model_inceptionV4_imgcls_timm_pytorch(variant):
return framework_model, [img_tensor]


variants = ["inception_v4", "inception_v4.tf_in1k"]


@pytest.mark.nightly
def test_inception_v4_timm_pytorch(record_forge_property):
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
@pytest.mark.parametrize("variant", variants)
def test_inception_v4_timm_pytorch(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="inception", variant="v4", source=Source.TIMM, task=Task.IMAGE_CLASSIFICATION
framework=Framework.PYTORCH,
model="inception",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

framework_model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch("inception_v4")
framework_model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
Expand Down
3 changes: 1 addition & 2 deletions forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,13 @@
"mixer_l32_224",
"mixer_s16_224",
"mixer_s32_224",
"mixer_b16_224.goog_in21k",
]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", varaints, ids=varaints)
def test_mlp_mixer_timm_pytorch(record_forge_property, variant):
if variant != "mixer_b16_224":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")

# Build Module Name
module_name = build_module_name(
Expand Down
36 changes: 36 additions & 0 deletions forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

from test.models.pytorch.vision.mobilenet.utils.utils import (
load_mobilenet_model,
load_timm_model,
post_processing,
)
from test.models.utils import Framework, Source, Task, build_module_name
Expand Down Expand Up @@ -129,3 +130,38 @@ def test_mobilenetv1_224(record_forge_property, variant):

# Model Verification
verify(inputs, framework_model, compiled_model)


variants = ["mobilenetv1_100.ra4_e3600_r224_in1k"]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_mobilenet_v1_timm(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="mobilenet_v1",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

# Load the model and prepare input data
framework_model, inputs = load_timm_model(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)

# Inference
output = compiled_model(*inputs)

# Post processing
post_processing(output)
14 changes: 14 additions & 0 deletions forge/test/models/pytorch/vision/mobilenet/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
# SPDX-License-Identifier: Apache-2.0
import os
import urllib
from urllib.request import urlopen

import timm
import torch
from PIL import Image
from torchvision import transforms
Expand Down Expand Up @@ -42,6 +44,18 @@ def load_mobilenet_model(model_name):
return model, [input_batch]


def load_timm_model(model_name):
model = timm.create_model(model_name, pretrained=True)

img = Image.open(
urlopen("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png")
)
data_config = timm.data.resolve_model_data_config(model)
transforms = timm.data.create_transform(**data_config, is_training=False)
input_batch = transforms(img).unsqueeze(0) # unsqueeze single image into batch of 1
return model, input_batch


url = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"


Expand Down
Loading

0 comments on commit 3456fd3

Please sign in to comment.