Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Port tt-torch models to forge #1337

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions env/linux_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ sacrebleu==2.1.0
sacremoses==0.0.53
seaborn
scikit-image==0.20.0 # For DenseNet 121 HF XRay model
segmentation_models_pytorch==0.3.2
segmentation_models_pytorch==0.4.0
sentencepiece==0.2.0
subword-nmt==0.3.8
tensorflow-hub==0.12.0
timm==0.6.12
timm==1.0.9
yolov5==7.0.9
# The CPU versions of torch and torch visions are used due to their size being
# several GB smaller which made a large impact on the performance of CI
Expand Down
28 changes: 20 additions & 8 deletions forge/test/models/pytorch/text/llama/test_llama3.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
"meta-llama/Llama-3.1-8B-Instruct",
"meta-llama/Llama-3.2-1B",
"meta-llama/Llama-3.2-1B-Instruct",
"meta-llama/Llama-3.2-3B",
"huggyllama/llama-7b",
]


Expand Down Expand Up @@ -145,18 +147,27 @@ def test_llama3_causal_lm(record_forge_property, variant):
tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
tokenizer.pad_token = tokenizer.eos_token
framework_model = download_model(AutoModelForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False)

framework_model.eval()
# Input prompt
input_prompt = "Hey how are you doing today?"

# Tokenize input
inputs = tokenizer(
input_prompt,
return_tensors="pt",
max_length=256,
pad_to_max_length=True,
truncation=True,
)
if variant in ["meta-llama/Llama-3.2-3B", "huggyllama/llama-7b"]:
inputs = tokenizer.encode_plus(
input_prompt,
return_tensors="pt",
max_length=32,
padding="max_length",
truncation=True,
)
else:
inputs = tokenizer(
input_prompt,
return_tensors="pt",
max_length=256,
pad_to_max_length=True,
truncation=True,
)
input_ids = inputs["input_ids"]
attn_mask = inputs["attention_mask"]

Expand Down Expand Up @@ -198,6 +209,7 @@ def test_llama3_sequence_classification(record_forge_property, variant):
framework_model = download_model(
AutoModelForSequenceClassification.from_pretrained, variant, use_cache=False, return_dict=False
)
framework_model.eval()

# Input prompt
input_prompt = "Movie is great"
Expand Down
30 changes: 30 additions & 0 deletions forge/test/models/pytorch/vision/dla/test_dla.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from forge.verify.verify import verify

from test.models.pytorch.vision.dla.utils.utils import load_dla_model, post_processing
from test.models.pytorch.vision.utils.utils import load_timm_model_and_input
from test.models.utils import Framework, Source, Task, build_module_name

variants = [
Expand Down Expand Up @@ -51,3 +52,32 @@ def test_dla_pytorch(record_forge_property, variant):

# post processing
post_processing(output)


variants = ["dla34.in1k"]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_dla_timm(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="dla",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

# Load the model and inputs
framework_model, inputs = load_timm_model_and_input(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from test.models.pytorch.vision.efficientnet.utils import (
src_efficientnet_lite as efflite,
)
from test.models.pytorch.vision.utils.utils import load_timm_model_and_input
from test.models.utils import Framework, Source, Task, build_module_name


Expand Down Expand Up @@ -181,3 +182,38 @@ def test_efficientnet_lite_4_pytorch(record_forge_property):

# Model Verification
verify(inputs, framework_model, compiled_model)


variants = [
"tf_efficientnet_lite0.in1k",
"tf_efficientnet_lite1.in1k",
"tf_efficientnet_lite2.in1k",
"tf_efficientnet_lite3.in1k",
"tf_efficientnet_lite4.in1k",
]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_efficientnet_lite_timm(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="efficientnet_lite",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

# Load the model and inputs
framework_model, inputs = load_timm_model_and_input(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)
9 changes: 7 additions & 2 deletions forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,16 @@
from test.models.utils import Framework, Source, Task, build_module_name

variants = ["ghostnet_100"]
params = [
pytest.param("ghostnet_100", marks=[pytest.mark.push]),
pytest.param("ghostnet_100.in1k", marks=[pytest.mark.push]),
pytest.param("ghostnetv2_100.in1k"),
]


@pytest.mark.push
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
@pytest.mark.parametrize("variant", params)
def test_ghostnet_timm(record_forge_property, variant):
# Build Module Name
module_name = build_module_name(
Expand All @@ -30,7 +35,7 @@ def test_ghostnet_timm(record_forge_property, variant):
)

# Record Forge Property
record_forge_property("tags.model_name", module_name)
record_forge_property("model_name", module_name)

# Load the model and prepare input data
framework_model, inputs = load_ghostnet_model(variant)
Expand Down
3 changes: 2 additions & 1 deletion forge/test/models/pytorch/vision/hrnet/test_hrnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,13 +148,14 @@ def generate_model_hrnet_imgcls_timm_pytorch(variant):
"hrnet_w44",
"hrnet_w48",
"hrnet_w64",
"hrnet_w18.ms_aug_in1k",
]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_hrnet_timm_pytorch(record_forge_property, variant):
if variant != "hrnet_w18_small":
if variant not in ["hrnet_w18_small", "hrnet_w18.ms_aug_in1k"]:
pytest.skip("Skipping due to the current CI/CD pipeline limitations")

# Build Module Name
Expand Down
17 changes: 13 additions & 4 deletions forge/test/models/pytorch/vision/inception/test_inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,19 +51,28 @@ def generate_model_inceptionV4_imgcls_timm_pytorch(variant):
return framework_model, [img_tensor]


variants = ["inception_v4", "inception_v4.tf_in1k"]


@pytest.mark.nightly
def test_inception_v4_timm_pytorch(record_forge_property):
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
@pytest.mark.parametrize("variant", variants)
def test_inception_v4_timm_pytorch(record_forge_property, variant):
if variant != "inception_v4.tf_in1k":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="inception", variant="v4", source=Source.TIMM, task=Task.IMAGE_CLASSIFICATION
framework=Framework.PYTORCH,
model="inception",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("tags.model_name", module_name)

framework_model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch("inception_v4")
framework_model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,14 @@
"mixer_l32_224",
"mixer_s16_224",
"mixer_s32_224",
"mixer_b16_224.goog_in21k",
]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", varaints, ids=varaints)
def test_mlp_mixer_timm_pytorch(record_forge_property, variant):
if variant != "mixer_b16_224":
if variant not in ["mixer_b16_224", "mixer_b16_224.goog_in21k"]:
pytest.skip("Skipping due to the current CI/CD pipeline limitations")

# Build Module Name
Expand Down
36 changes: 36 additions & 0 deletions forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
load_mobilenet_model,
post_processing,
)
from test.models.pytorch.vision.utils.utils import load_timm_model_and_input
from test.models.utils import Framework, Source, Task, build_module_name
from test.utils import download_model

Expand Down Expand Up @@ -129,3 +130,38 @@ def test_mobilenetv1_224(record_forge_property, variant):

# Model Verification
verify(inputs, framework_model, compiled_model)


variants = ["mobilenetv1_100.ra4_e3600_r224_in1k"]


@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_mobilenet_v1_timm(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="mobilenet_v1",
variant=variant,
source=Source.TIMM,
task=Task.IMAGE_CLASSIFICATION,
)

# Record Forge Property
record_forge_property("model_name", module_name)

# Load the model and inputs
framework_model, inputs = load_timm_model_and_input(variant)

# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)

# Inference
output = compiled_model(*inputs)

# Post processing
post_processing(output)
39 changes: 26 additions & 13 deletions forge/test/models/pytorch/vision/resnet/test_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@
import torch
from datasets import load_dataset
from tabulate import tabulate
from torchvision.models.resnet import resnet50
from transformers import AutoImageProcessor, ResNetForImageClassification

import forge
from forge.verify.config import VerifyConfig
from forge.verify.value_checkers import AutomaticValueChecker
from forge.verify.verify import verify

from test.models.pytorch.vision.utils.utils import load_vision_model_and_input
from test.models.utils import Framework, Source, Task, build_module_name
from test.utils import download_model

Expand Down Expand Up @@ -113,24 +113,37 @@ def test_resnet_timm(record_forge_property):
verify(input_sample, framework_model, compiled_model, VerifyConfig(value_checker=AutomaticValueChecker(pcc=0.95)))


variants_with_weights = {
"resnet18": "ResNet18_Weights",
"resnet34": "ResNet34_Weights",
"resnet50": "ResNet50_Weights",
"resnet101": "ResNet101_Weights",
"resnet152": "ResNet152_Weights",
}


@pytest.mark.nightly
def test_resnet_torchvision(record_forge_property):
# Record model details
@pytest.mark.parametrize("variant", variants_with_weights.keys())
def test_resnet_torchvision(record_forge_property, variant):

# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="resnet",
source=Source.TORCHVISION,
variant="50",
variant=variant,
task=Task.IMAGE_CLASSIFICATION,
source=Source.TORCHVISION,
)
record_forge_property("tags.model_name", module_name)

# Load framework model
framework_model = resnet50()
# Record Forge Property
record_forge_property("model_name", module_name)

# Compile model
input_sample = [torch.rand(1, 3, 224, 224)]
compiled_model = forge.compile(framework_model, input_sample)
# Load model and input
weight_name = variants_with_weights[variant]
framework_model, inputs = load_vision_model_and_input(variant, "classification", weight_name)

# Verify data on sample input
verify(input_sample, framework_model, compiled_model, VerifyConfig(value_checker=AutomaticValueChecker(pcc=0.95)))
# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

# Model Verification
verify(inputs, framework_model, compiled_model)
Loading
Loading