Skip to content

Update record_forge_property fixture to store property inside the For… #545

Update record_forge_property fixture to store property inside the For…

Update record_forge_property fixture to store property inside the For… #545

GitHub Actions / TT-Forge-FE Tests failed Feb 28, 2025 in 0s

84 tests run, 6 passed, 62 skipped, 16 failed.

Annotations

Check failure on line 81 in forge/test/models/pytorch/audio/whisper/test_whisper.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_whisper.test_whisper[openai/whisper-tiny]

AssertionError: Eltwise binary ops must have the same shape in both inputs, or one operand must be 1 wide to broadcast: [1, 3000, 1, 384] vs [1, 384, 3000, 1]
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34d81d4880>
variant = 'openai/whisper-tiny'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_whisper(record_forge_property, variant):
        if variant != "openai/whisper-tiny":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="whisper",
            variant=variant,
            task=Task.SPEECH_RECOGNITION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Load model (with tokenizer and feature extractor)
        processor = download_model(AutoProcessor.from_pretrained, variant)
        model_config = WhisperConfig.from_pretrained(variant)
        model = download_model(
            WhisperForConditionalGeneration.from_pretrained,
            variant,
            config=model_config,
        )
        model.config.use_cache = False
    
        # Load and preprocess sample audio
        sample = torch.load("forge/test/models/files/samples/audio/1272-128104-0000.pt")
        sample_audio = sample["audio"]["array"]
    
        inputs = processor(sample_audio, return_tensors="pt")
        input_features = inputs.input_features
    
        # Get decoder inputs
        decoder_start_token_tensor = torch.tensor(model.generation_config.decoder_start_token_id, dtype=torch.long)
        decoder_input_ids = torch.ones((1, 1), dtype=torch.long) * decoder_start_token_tensor
    
        inputs = [input_features, decoder_input_ids]
    
        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model
    
            def forward(self, input_features, decoder_input_ids):
                inputs = {"input_features": input_features, "decoder_input_ids": decoder_input_ids}
                output = self.model(**inputs)
                return output.logits
    
        framework_model = Wrapper(model)
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/audio/whisper/test_whisper.py:81: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:828: in run_optimization_pass
    run_optimization_graph_passes(graph)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/op/eval/forge/__init__.py:219: in <lambda>
    return lambda *inputs: module_or_class.shape(op_type.op, op_type.attr, *inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

type = 'add', attr = [], ops = [[1, 3000, 1, 384], [1, 384, 3000, 1]]

    def shape(type, attr, ops) -> Tuple[Tuple, List]:
        assert len(ops) == 2, "Eltwise binary should have two inputs"
    
        if type == "binary_stack":
            dim = attr[0]
            assert ops[0] == ops[1]
            output_shape = list(ops[0])
            output_shape[dim] *= 2
            return tuple(output_shape), []
    
        assert len(attr) == 0, "Eltwise binary should have no attributes"
    
        broadcast = []
        output_shape = []
    
        ops[0] = list(ops[0])
        while len(ops[0]) < len(ops[1]):
            ops[0] = [1] + ops[0]
    
        ops[1] = list(ops[1])
        while len(ops[1]) < len(ops[0]):
            ops[1] = [1] + ops[1]
    
        for dim in range(len(ops[0])):
            if ops[0][dim] != ops[1][dim]:
                if ops[1][dim] == 1:
                    broadcast.append((1, dim - len(ops[1]), ops[0][dim]))  # Convert to negative indexing
                    output_shape.append(ops[0][dim])
                else:
                    assert (
>                       ops[0][dim] == 1
                    ), f"Eltwise binary ops must have the same shape in both inputs, or one operand must be 1 wide to broadcast: {ops[0]} vs {ops[1]}"
E                   AssertionError: Eltwise binary ops must have the same shape in both inputs, or one operand must be 1 wide to broadcast: [1, 3000, 1, 384] vs [1, 384, 3000, 1]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/op/eval/forge/eltwise_binary.py:91: AssertionError

Check failure on line 53 in forge/test/models/pytorch/multimodal/clip/test_clip.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_clip.test_clip_pytorch[openai/clip-vit-base-patch32]

RuntimeError: Generated MLIR module failed verification.
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34c976fe50>
variant = 'openai/clip-vit-base-patch32'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["openai/clip-vit-base-patch32"])
    def test_clip_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="clip",
            variant=variant,
            suffix="text",
            source=Source.HUGGINGFACE,
            task=Task.TEXT_GENERATION,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Load processor and model from HuggingFace
        model = download_model(CLIPModel.from_pretrained, variant, torchscript=True)
        processor = download_model(CLIPProcessor.from_pretrained, variant)
    
        # Load image from the IAM dataset
        url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        image = Image.open(requests.get(url, stream=True).raw)
    
        # Process image
        text = [
            "a photo of a cat",
            "a photo of a dog",
        ]
        inputs = processor(text=text, images=image, return_tensors="pt")
    
        inputs = [inputs["input_ids"], inputs["pixel_values"], inputs["attention_mask"]]
        framework_model = CLIPTextWrapper(model)
        inputs = [inputs[0], inputs[2]]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/multimodal/clip/test_clip.py:53: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_clip_openai_clip_vit_base_patch32_text_gen_hf_text], graph_name='pt_clip_openai_clip...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f34c96d9170>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Generated MLIR module failed verification.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:985: RuntimeError

Check failure on line 69 in forge/test/models/pytorch/text/bart/test_bart.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_bart.test_pt_bart_classifier[facebook/bart-large-mnli]

RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/kernels/kernel.cpp:242: tt::exception
info:
1283 unique+common runtime args targeting kernel reader_concat_stick_layout_interleaved_start_id on (x=0,y=0) are too large. Max allowable is 256
backtrace:
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x143335) [0x7f3524631335]
 --- tt::tt_metal::v0::Kernel::validate_runtime_args_size(unsigned long, unsigned long, tt::umd::xy_pair const&)
 --- tt::tt_metal::v0::Kernel::set_runtime_args(tt::umd::xy_pair const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x340573) [0x7f352482e573]
 --- tt::tt_metal::v0::SetRuntimeArgs(tt::tt_metal::v0::Program const&, unsigned int, std::variant<tt::umd::xy_pair, CoreRange, CoreRangeSet> const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
 --- ttnn::operations::data_movement::detail::concat_multi_core(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, unsigned int, tt::tt_metal::Tensor const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60fe83) [0x7f35131cce83]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c0fda) [0x7f351517dfda]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c13e4) [0x7f351517e3e4]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN4ttnn16device_operation6detail23launch_on_worker_threadIN2tt8tt_metal9operation23OldInfraDeviceOperationISt6vectorINS4_6TensorESaIS8_EEEENS3_3stl10StrongTypeIhNS_10QueueIdTagEEElNS5_15DeviceOperationISA_EENSB_13tensor_args_tESA_PNS4_2v07IDeviceEEEvT0_T1_RKT2_RKT3_RT4_RT5_+0x2ab) [0x7f351517d72b]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN4ttnn16device_operation6detail23launch_on_single_deviceIN2tt8tt_metal9operation23OldInfraDeviceOperationISt6vectorINS4_6TensorESaIS8_EEEEEENT_21tensor_return_value_tENS3_3stl10StrongTypeIhNS_10QueueIdTagEEERKNSC_22operation_attributes_tERKNSC_13tensor_args_tE+0x83) [0x7f351517d3e3]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c02d7) [0x7f351517d2d7]
 --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(tt::stl::StrongType<unsigned char, ttnn::QueueIdTag>, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25bf49f) [0x7f351517c49f]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN2tt8tt_metal9operation19run_with_autoformatEONS1_15DeviceOperationISt6vectorINS0_6TensorESaIS4_EEEERKS6_RKS3_IN4ttnn10operations12experimental11auto_format12FormatParamsESaISF_EERKS3_INS0_6LayoutESaISK_EERKS3_ISt8optionalIKS4_ESaISR_EERKS3_ISP_ISF_ESaISW_EERKS3_ISP_IS4_ESaIS11_EENS_3stl10StrongTypeIhNSB_10QueueIdTagEEE+0x503) [0x7f3515174883]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60f0b5) [0x7f35131cc0b5]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c6fe2) [0x7f3515183fe2]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c77cc) [0x7f35151847cc]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- ttnn::operations::data_movement::concat_impl(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, long, unsigned int, tt::tt_metal::MemoryConfig const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60947b) [0x7f35131c647b]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5e8895) [0x7f35131a5895]
 --- ttnn::operations::data_movement::ConcatOperation::invoke(tt::stl::StrongType<unsigned char, ttnn::QueueIdTag>, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x577513) [0x7f3513134513]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x577421) [0x7f3513134421]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57737c) [0x7f351313437c]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57713c) [0x7f351313413c]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x576bcb) [0x7f3513133bcb]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x576749) [0x7f3513133749]
 --- ttnn::operations::data_movement::ExecuteRepeatInterleave::invoke(tt::tt_metal::Tensor const&, unsigned int, int, std::optional<tt::tt_metal::MemoryConfig> const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe8614) [0x7f3525230614]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe838a) [0x7f352523038a]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe82dc) [0x7f35252302dc]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c6fe2) [0x7f3515183fe2]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c77cc) [0x7f35151847cc]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe7fe3) [0x7f352522ffe3]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe7b2c) [0x7f352522fb2c]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe75b7) [0x7f352522f5b7]
 --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::RepeatInterleaveOp const*, tt::runtime::ttnn::ProgramContext&)
 --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
 --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316d40) [0x7f3525670d40]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316c8e) [0x7f3525670c8e]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0d75) [0x7f352543ad75]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x55c99f04cb32]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x55c99f03ca97]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x55c99f042574]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x55c99f05b172]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x55c99f03797f]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f3615aaba60>
variant = 'facebook/bart-large-mnli'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["facebook/bart-large-mnli"])
    def test_pt_bart_classifier(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="bart",
            variant=variant,
            task=Task.SEQUENCE_CLASSIFICATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        model = download_model(BartForSequenceClassification.from_pretrained, variant, torchscript=True)
        tokenizer = download_model(BartTokenizer.from_pretrained, variant, pad_to_max_length=True)
        hypothesis = "Most of Mrinal Sen's work can be found in European collections."
        premise = "Calcutta seems to be the only other production center having any pretensions to artistic creativity at all, but ironically you're actually more likely to see the works of Satyajit Ray or Mrinal Sen shown in Europe or North America than in India itself."
    
        # generate inputs
        inputs_dict = tokenizer(
            premise,
            hypothesis,
            truncation=True,
            padding="max_length",
            max_length=256,
            truncation_strategy="only_first",
            return_tensors="pt",
        )
        decoder_input_ids = shift_tokens_right(
            inputs_dict["input_ids"], model.config.pad_token_id, model.config.decoder_start_token_id
        )
        inputs = [inputs_dict["input_ids"], inputs_dict["attention_mask"], decoder_input_ids]
    
        # Compile & feed data
        framework_model = BartWrapper(model.model)
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/bart/test_bart.py:69: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f3615b4c880>
inputs = (tensor([[    0, 15117,  8267,  4349,  1302,     7,    28,     5,   129,    97,   931,  1312,   519,   143, 11857, 197...  1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1,     1]]))
inputs_and_parameters = [tensor([[    0, 15117,  8267,  4349,  1302,     7,    28,     5,   129,    97,   931,  1312,   519,   143, 11857, 197... [1.00000e-05],
         [1.00000e-05],
         [1.00000e-05],
         [1.00000e-05],
         [1.00000e-05]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/kernels/kernel.cpp:242: tt::exception
E       info:
E       1283 unique+common runtime args targeting kernel reader_concat_stick_layout_interleaved_start_id on (x=0,y=0) are too large. Max allowable is 256
E       backtrace:
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x143335) [0x7f3524631335]
E        --- tt::tt_metal::v0::Kernel::validate_runtime_args_size(unsigned long, unsigned long, tt::umd::xy_pair const&)
E        --- tt::tt_metal::v0::Kernel::set_runtime_args(tt::umd::xy_pair const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x340573) [0x7f352482e573]
E        --- tt::tt_metal::v0::SetRuntimeArgs(tt::tt_metal::v0::Program const&, unsigned int, std::variant<tt::umd::xy_pair, CoreRange, CoreRangeSet> const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
E        --- ttnn::operations::data_movement::detail::concat_multi_core(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, unsigned int, tt::tt_metal::Tensor const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60fe83) [0x7f35131cce83]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c0fda) [0x7f351517dfda]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c13e4) [0x7f351517e3e4]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN4ttnn16device_operation6detail23launch_on_worker_threadIN2tt8tt_metal9operation23OldInfraDeviceOperationISt6vectorINS4_6TensorESaIS8_EEEENS3_3stl10StrongTypeIhNS_10QueueIdTagEEElNS5_15DeviceOperationISA_EENSB_13tensor_args_tESA_PNS4_2v07IDeviceEEEvT0_T1_RKT2_RKT3_RT4_RT5_+0x2ab) [0x7f351517d72b]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN4ttnn16device_operation6detail23launch_on_single_deviceIN2tt8tt_metal9operation23OldInfraDeviceOperationISt6vectorINS4_6TensorESaIS8_EEEEEENT_21tensor_return_value_tENS3_3stl10StrongTypeIhNS_10QueueIdTagEEERKNSC_22operation_attributes_tERKNSC_13tensor_args_tE+0x83) [0x7f351517d3e3]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c02d7) [0x7f351517d2d7]
E        --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(tt::stl::StrongType<unsigned char, ttnn::QueueIdTag>, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25bf49f) [0x7f351517c49f]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN2tt8tt_metal9operation19run_with_autoformatEONS1_15DeviceOperationISt6vectorINS0_6TensorESaIS4_EEEERKS6_RKS3_IN4ttnn10operations12experimental11auto_format12FormatParamsESaISF_EERKS3_INS0_6LayoutESaISK_EERKS3_ISt8optionalIKS4_ESaISR_EERKS3_ISP_ISF_ESaISW_EERKS3_ISP_IS4_ESaIS11_EENS_3stl10StrongTypeIhNSB_10QueueIdTagEEE+0x503) [0x7f3515174883]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60f0b5) [0x7f35131cc0b5]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c6fe2) [0x7f3515183fe2]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c77cc) [0x7f35151847cc]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- ttnn::operations::data_movement::concat_impl(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, long, unsigned int, tt::tt_metal::MemoryConfig const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60947b) [0x7f35131c647b]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5e8895) [0x7f35131a5895]
E        --- ttnn::operations::data_movement::ConcatOperation::invoke(tt::stl::StrongType<unsigned char, ttnn::QueueIdTag>, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x577513) [0x7f3513134513]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x577421) [0x7f3513134421]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57737c) [0x7f351313437c]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57713c) [0x7f351313413c]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x576bcb) [0x7f3513133bcb]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x576749) [0x7f3513133749]
E        --- ttnn::operations::data_movement::ExecuteRepeatInterleave::invoke(tt::tt_metal::Tensor const&, unsigned int, int, std::optional<tt::tt_metal::MemoryConfig> const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe8614) [0x7f3525230614]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe838a) [0x7f352523038a]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe82dc) [0x7f35252302dc]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c6fe2) [0x7f3515183fe2]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25c77cc) [0x7f35151847cc]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe7fe3) [0x7f352522ffe3]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe7b2c) [0x7f352522fb2c]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe75b7) [0x7f352522f5b7]
E        --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::RepeatInterleaveOp const*, tt::runtime::ttnn::ProgramContext&)
E        --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
E        --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316d40) [0x7f3525670d40]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316c8e) [0x7f3525670c8e]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0d75) [0x7f352543ad75]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x55c99f04cb32]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x55c99f03ca97]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x55c99f042574]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x55c99f05b172]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x55c99f03797f]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:254: RuntimeError

Check failure on line 71 in forge/test/models/pytorch/text/nanogpt/test_nanogpt.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_nanogpt.test_nanogpt_text_generation[FinancialSupport/NanoGPT]

RuntimeError: Tensor 6 - data type mismatch: expected Float32, got UInt8
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f361598b3a0>
variant = 'FinancialSupport/NanoGPT'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["FinancialSupport/NanoGPT"])
    def test_nanogpt_text_generation(record_forge_property, variant):
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="nanogpt",
            variant=variant,
            task=Task.TEXT_GENERATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Load the model
        tokenizer = AutoTokenizer.from_pretrained(variant)
        tokenizer.pad_token = tokenizer.eos_token
        model = AutoModel.from_pretrained(variant, ignore_mismatched_sizes=True, use_cache=False, return_dict=False)
    
        # Input prompt
        input_prompt = "The financial market showed signs of volatility"
    
        # Tokenize input
        inputs = tokenizer(
            input_prompt,
            return_tensors="pt",
            max_length=150,
            padding=True,
            truncation=True,
        )
        input_ids = inputs["input_ids"]
        attn_mask = inputs["attention_mask"]
        inputs = [input_ids, attn_mask]
    
        framework_model = Wrapper(model)
    
        # Forge compile framework model
        compiled_model = forge.compile(
            framework_model,
            inputs,
            module_name=module_name,
        )
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/nanogpt/test_nanogpt.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f34c9534e50>
inputs = (tensor([[  464,  3176,  1910,  3751,  5895,   286, 30772]]), tensor([[1, 1, 1, 1, 1, 1, 1]]))
inputs_and_parameters = [tensor([[  464,  3176,  1910,  3751,  5895,   286, 30772]]), tensor([[1, 1, 1, 1, 1, 1, 1]]), tensor([[[0.00130, 0.00...      [1.00000e-05],
         [1.00000e-05],
         [1.00000e-05],
         [1.00000e-05]]]), tensor([0.12500]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 6 - data type mismatch: expected Float32, got UInt8

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:254: RuntimeError

Check failure on line 61 in forge/test/models/pytorch/text/qwen/test_qwen_coder.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_qwen_coder.test_qwen_clm[Qwen/Qwen2.5-Coder-0.5B]

RuntimeError: Input count mismatch: expected 533, got 534
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f3615be1510>
variant = 'Qwen/Qwen2.5-Coder-0.5B'

    @pytest.mark.parametrize("variant", variants, ids=variants)
    @pytest.mark.nightly
    def test_qwen_clm(record_forge_property, variant):
        if variant != "Qwen/Qwen2.5-Coder-0.5B":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="qwen_coder", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Load model and tokenizer
        framework_model = AutoModelForCausalLM.from_pretrained(variant, device_map="cpu")
        framework_model.config.return_dict = False
        tokenizer = AutoTokenizer.from_pretrained(variant)
    
        # Prepare input
        prompt = "write a quick sort algorithm."
        messages = [
            {"role": "system", "content": "You are Qwen, created by TT Cloud. You are a helpful assistant."},
            {"role": "user", "content": prompt},
        ]
        text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    
        # Tokenize and prepare inputs
        model_inputs = tokenizer([text], return_tensors="pt")
        input_ids = model_inputs["input_ids"]
        attention_mask = model_inputs["attention_mask"]
        inputs = [input_ids, attention_mask]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/qwen/test_qwen_coder.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f360cf2bee0>
inputs = (tensor([[151644,   8948,    198,   2610,    525,   1207,  16948,     11,   3465,    553,  32744,  14817,     13,   14...), tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
inputs_and_parameters = [tensor([[151644,   8948,    198,   2610,    525,   1207,  16948,     11,   3465,    553,  32744,  14817,     13,   14...., 0., 0.],
          [0., 0., 0.,  ..., 0., 1., 0.],
          [0., 0., 0.,  ..., 0., 0., 1.]]]]), tensor([-1.]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Input count mismatch: expected 533, got 534

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:254: RuntimeError

Check failure on line 90 in forge/test/models/pytorch/text/t5/test_t5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_t5.test_t5_generation[t5-large]

ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[-20.48772, -10.36655, -13.71507,  ..., -39.06538, -39.12180, -39.08955]]], grad_fn=<UnsafeViewBackward0>), compiled_model=tensor([[[-22.37089, -18.76008, -19.40021,  ..., -60.30701, -60.50323, -60.47663]]])
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f360cd25600>
variant = 't5-large'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_t5_generation(record_forge_property, variant):
        if variant not in {"t5-small", "google/flan-t5-small", "t5-base", "t5-large"}:
            pytest.skip(f"Skipping {variant} due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="t5", variant=variant, task=Task.TEXT_GENERATION, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Load tokenizer and model from HuggingFace
        # Variants: t5-small, t5-base, t5-large
    
        config = download_model(T5Config.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = T5Config(**config_dict)
        model = download_model(T5ForConditionalGeneration.from_pretrained, variant, config=config)
        tokenizer = AutoTokenizer.from_pretrained(variant)
    
        inputs = tokenizer(
            "summarize: Researchers have extensively studied the benefits of having pets, "
            "particularly dogs, on human health and well-being. Findings suggest that pet ownership "
            "can lead to improved mental health, reduced stress levels, and even physical health benefits "
            "such as lower blood pressure and increased physical activity levels due to regular walks.",
            return_tensors="pt",
        )
    
        input_ids = inputs.input_ids
        decoder_start_token_tensor = torch.tensor(model.generation_config.decoder_start_token_id, dtype=torch.long)
        decoder_input_ids = torch.ones((1, 1), dtype=torch.long) * decoder_start_token_tensor
        inputs = [input_ids, decoder_input_ids]
    
        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model
    
            def forward(self, input_ids, decoder_input_ids):
                inputs = {"input_ids": input_ids, "decoder_input_ids": decoder_input_ids}
                output = self.model(**inputs)
                return output
    
        framework_model = Wrapper(model)
    
        # Forge compile
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/t5/test_t5.py:90: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
    verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7f350f51ca30>
fw_out = tensor([[[-20.48772, -10.36655, -13.71507,  ..., -39.06538, -39.12180, -39.08955]]], grad_fn=<UnsafeViewBackward0>)
co_out = tensor([[[-22.37089, -18.76008, -19.40021,  ..., -60.30701, -60.50323, -60.47663]]])

    def check(self, fw_out, co_out):
        if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
>           raise ValueError(
                f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
            )
E           ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[-20.48772, -10.36655, -13.71507,  ..., -39.06538, -39.12180, -39.08955]]], grad_fn=<UnsafeViewBackward0>), compiled_model=tensor([[[-22.37089, -18.76008, -19.40021,  ..., -60.30701, -60.50323, -60.47663]]])

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError

Check failure on line 51 in forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_autoencoder.test_conv_ae_pytorch

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f360ce4d120>

    @pytest.mark.nightly
    def test_conv_ae_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="autoencoder", variant="conv", task=Task.IMAGE_ENCODING, source=Source.GITHUB
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Instantiate model
        # NOTE: The model has not been pre-trained or fine-tuned.
        # This is for demonstration purposes only.
        framework_model = ConvAE()
    
        # Define transform to normalize data
        transform = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,)),
            ]
        )
    
        # Load sample from MNIST dataset
        dataset = load_dataset("mnist")
        sample = dataset["train"][0]["image"]
        sample_tensor = transform(sample).unsqueeze(0)
    
        inputs = [sample_tensor]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_autoencoder_conv_img_enc_github], graph_name='pt_autoencoder_conv_img_enc_github', c...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f360a8a97f0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:985: RuntimeError

Check failure on line 106 in forge/test/models/pytorch/vision/densenet/test_densenet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_densenet.test_densenet_161_pytorch[densenet161]

RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f355e0e9d20>
variant = 'densenet161'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["densenet161"])
    def test_densenet_161_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="densenet",
            variant=variant,
            source=Source.TORCHVISION,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # STEP 2: Create Forge module from PyTorch model
        framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet161", pretrained=True)
    
        # STEP 3: Run inference on Tenstorrent device
        img_tensor = get_input_img()
        inputs = [img_tensor]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/densenet/test_densenet.py:106: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f34c5dfc0a0>
inputs = (tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241... ..., -0.95041, -1.49072, -1.38614],
          [-1.28157, -1.42100, -1.22928,  ..., -0.74126, -1.12471, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241....22809e+00, 3.30098e+00, 2.08003e+00, 3.74907e+00, 2.18630e+00, 2.48350e+00, 3.65909e+00]]]], requires_grad=True), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:254: RuntimeError

Check failure on line 172 in forge/test/models/pytorch/vision/hrnet/test_hrnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_hrnet.test_hrnet_timm_pytorch[hrnet_w18_small]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34c52b1de0>
variant = 'hrnet_w18_small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_hrnet_timm_pytorch(record_forge_property, variant):
        if variant != "hrnet_w18_small":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="hrnet", variant=variant, source=Source.TIMM, task=Task.POSE_ESTIMATION
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        framework_model, inputs, _ = generate_model_hrnet_imgcls_timm_pytorch(
            variant,
        )
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/hrnet/test_hrnet.py:172: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_hrnet_hrnet_w18_small_pose_estimation_timm], graph_name='pt_hrnet_hrnet_w18_small_po...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f34c61eb170>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:985: RuntimeError

Check failure on line 45 in forge/test/models/pytorch/vision/inception/test_inception_v4.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_inception_v4.test_inception_v4_osmr_pytorch

RuntimeError: Tensor 47 - stride mismatch: expected [1225, 1], got [0, 0]
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34c527d2d0>

    @pytest.mark.nightly
    def test_inception_v4_osmr_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="inception", variant="v4", source=Source.OSMR, task=Task.IMAGE_CLASSIFICATION
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        framework_model, inputs = generate_model_inceptionV4_imgcls_osmr_pytorch("inceptionv4")
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/inception/test_inception_v4.py:45: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f34c629ff70>
inputs = (tensor([[[[-2.08365, -2.03228, -1.99803,  ..., -2.04940, -2.03228, -1.99803],
          [-2.04940, -2.03228, -2.04940... ..., -0.63669, -0.95041, -0.63669],
          [-0.60183, -0.79355, -1.05499,  ..., -0.49725, -1.10728, -0.91556]]]]),)
inputs_and_parameters = [tensor([[[[-2.08365, -2.03228, -1.99803,  ..., -2.04940, -2.03228, -1.99803],
          [-2.04940, -2.03228, -2.04940...0., 0.,  ..., 0., 0., 0.],
          [0., 0., 0.,  ..., 0., 0., 0.],
          [0., 0., 0.,  ..., 0., 0., 0.]]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 47 - stride mismatch: expected [1225, 1], got [0, 0]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:254: RuntimeError

Check failure on line 40 in forge/test/models/pytorch/vision/regnet/test_regnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_regnet.test_regnet[facebook/regnet-y-040]

RuntimeError: Unknown type of tensor: <class 'transformers.modeling_outputs.BaseModelOutputWithPoolingAndNoAttention'>
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34bea55de0>
variant = 'facebook/regnet-y-040'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["facebook/regnet-y-040"])
    def test_regnet(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="regnet",
            variant=variant,
            source=Source.HUGGINGFACE,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # Load RegNet model
        framework_model = RegNetModel.from_pretrained("facebook/regnet-y-040")
    
        # Preprocess the image
        image_url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        inputs = preprocess_input_data(image_url, variant)
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/regnet/test_regnet.py:40: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:306: in verify
    fw_out = to_pt_tensors(fw_out)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tensor.py:1150: in to_pt_tensors
    pytorch_tensors.append(to_pt_tensor(t))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

t = BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=tensor([[[[0.00000, 0.00000, 0.00000,  ..., 0.00000, 0.0000....,

         [[0.46296]],

         [[0.45181]],

         [[0.66460]]]], grad_fn=<MeanBackward1>), hidden_states=None)

    def to_pt_tensor(t: AnyTensor) -> torch.Tensor:
        if isinstance(t, torch.Tensor):
            return t
        elif isinstance(t, (tf.Tensor, tf.Variable)):
            pt = torch.Tensor(t.numpy() if t.dtype != tf.bfloat16 else tf.cast(t, tf.float32).numpy()).type(
                map_tf_dtype_to_pt(t.dtype)
            )
            pt.requires_grad = (
                t.trainable if isinstance(t, tf.Variable) else torch.is_complex(pt) or torch.is_floating_point(pt)
            )
            return pt
        elif isinstance(t, Tensor):
            assert t.has_value(), "Expected Forge tensor to have a value"
            return t.value()
        else:
>           raise RuntimeError(f"Unknown type of tensor: {type(t)}")
E           RuntimeError: Unknown type of tensor: <class 'transformers.modeling_outputs.BaseModelOutputWithPoolingAndNoAttention'>

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tensor.py:1170: RuntimeError

Check failure on line 46 in forge/test/models/pytorch/vision/swin/test_swin.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_swin.test_swin_v1_tiny_4_224_hf_pytorch[microsoft/swin-tiny-patch4-window7-224]

AssertionError: Data mismatch on output 0 between framework and Forge codegen
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34c5162bf0>
variant = 'microsoft/swin-tiny-patch4-window7-224'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["microsoft/swin-tiny-patch4-window7-224"])
    def test_swin_v1_tiny_4_224_hf_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="swin",
            variant=variant,
            source=Source.HUGGINGFACE,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        # STEP 1: Create Forge module from PyTorch model
        feature_extractor = ViTImageProcessor.from_pretrained(variant)
        framework_model = SwinForImageClassification.from_pretrained(variant)
        framework_model.eval()
    
        # STEP 2: Prepare input samples
        url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        inputs = load_image(url, feature_extractor)
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/swin/test_swin.py:46: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:671: in generate_initial_graph
    module, module_inputs = convert_to_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:1032: in convert_to_forge_module
    forge_module, dev_types, module_inputs = generate_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:2121: in generate_forge_module
    verify_framework_vs_forge_codegen(framework_outputs, forge_outputs, verify_cfg=verify_cfg)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

frame_outputs = [tensor([[-9.48239e-02, -6.45402e-01, -9.21106e-02, -8.29076e-02, -8.01499e-01, -1.64797e+00, -6.48718e-01, -1.09166e+...0e-01, -1.14277e+00, -1.18404e+00, -8.81640e-01, -9.22460e-01,  1.11558e+00, -6.23075e-01]], grad_fn=<AddmmBackward0>)]
forge_outputs = (Forge Tensor: tensor([[-1.83657e-01, -6.70440e-01, -1.63592e-02, -1.17261e-01, -7.53180e-01, -1.70384e+00, -7.06809e-..., -1.20142e+00, -1.15068e+00, -7.24469e-01,  1.18212e+00, -7.21010e-01]], grad_fn=<AddBackward0>), DataFormat.Float32,)
verify_cfg = DepricatedVerifyConfig(graph_name='graph', enabled=False, intermediates=False, rtol={torch.float32: None, torch.float1...ent_checking=True, enable_parameter_gradient_checking=True, _input_gradient_queue=None, _parameter_gradient_queue=None)

    def verify_framework_vs_forge_codegen(frame_outputs, forge_outputs, verify_cfg):
        from forge.verify.compare import compare_tensor_to_golden
    
        test_pass = True
        for i, (golden, output) in enumerate(zip(frame_outputs, forge_outputs)):
            test_pass &= compare_tensor_to_golden(
                f"Framework vs. Forge codegen output {i}", golden, output.value(), is_forge=False, verify_cfg=verify_cfg
            )
    
>           assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen"
E           AssertionError: Data mismatch on output 0 between framework and Forge codegen

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:1973: AssertionError

Check failure on line 53 in forge/test/models/pytorch/vision/unet/test_unet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_unet.test_unet_osmr_cityscape_pytorch

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f3615a24cd0>

    @pytest.mark.nightly
    def test_unet_osmr_cityscape_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="unet", variant="cityscape", source=Source.OSMR, task=Task.IMAGE_SEGMENTATION
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        framework_model, inputs, _ = generate_model_unet_imgseg_osmr_pytorch("unet_cityscapes")
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/unet/test_unet.py:53: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_unet_cityscape_img_seg_osmr], graph_name='pt_unet_cityscape_img_seg_osmr', compiler_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f34d5f332f0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:985: RuntimeError

Check failure on line 66 in forge/test/models/pytorch/vision/vgg/test_vgg.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_vgg.test_vgg_osmr_pytorch[vgg11]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f360cf2a140>
variant = 'vgg11'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_vgg_osmr_pytorch(record_forge_property, variant):
        if variant != "vgg11":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="vgg", variant=variant, source=Source.OSMR, task=Task.OBJECT_DETECTION
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        framework_model = download_model(ptcv_get_model, variant, pretrained=True)
        framework_model.eval()
    
        # Image preprocessing
        try:
            torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            input_image = Image.open("dog.jpg")
            preprocess = transforms.Compose(
                [
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                ]
            )
            input_tensor = preprocess(input_image)
            input_batch = input_tensor.unsqueeze(0)  # create a mini-batch as expected by the model
        except:
            logger.warning(
                "Failed to download the image file, replacing input with random tensor. Please check if the URL is up to date"
            )
            input_batch = torch.rand(1, 3, 224, 224)
    
        inputs = [input_batch]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/vgg/test_vgg.py:66: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_vgg_vgg11_obj_det_osmr], graph_name='pt_vgg_vgg11_obj_det_osmr', compiler_cfg=Compil...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f34bfd51df0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:985: RuntimeError

Check failure on line 51 in forge/test/models/pytorch/vision/vovnet/test_vovnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_vovnet.test_vovnet_osmr_pytorch[vovnet27s]

RuntimeError: TT_FATAL @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp:50: new_volume == old_volume
info:
Invalid arguments to reshape
backtrace:
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xbd288) [0x7f3525205288]
 --- tt::tt_metal::infer_dims_for_reshape(tt::tt_metal::Tensor const&, tt::stl::Span<int const, 18446744073709551615ul>)
 --- ttnn::operations::data_movement::ReshapeViewOperation::invoke(tt::stl::StrongType<unsigned char, ttnn::QueueIdTag>, tt::tt_metal::Tensor const&, tt::stl::Span<int const, 18446744073709551615ul>, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<std::variant<unsigned int, float> > const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe69c2) [0x7f352522e9c2]
 --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::ReshapeOp const*, tt::runtime::ttnn::ProgramContext&)
 --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
 --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316d40) [0x7f3525670d40]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316c8e) [0x7f3525670c8e]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0d75) [0x7f352543ad75]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x55c99f04cb32]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x55c99f03ca97]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x55c99f042574]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x55c99f05b172]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x55c99f03797f]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f3615a72140>
variant = 'vovnet27s'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", varaints, ids=varaints)
    def test_vovnet_osmr_pytorch(record_forge_property, variant):
        if variant != "vovnet27s":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="vovnet", variant=variant, source=Source.OSMR, task=Task.OBJECT_DETECTION
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        framework_model, inputs, _ = generate_model_vovnet_imgcls_osmr_pytorch(variant)
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/vovnet/test_vovnet.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f34f8bb1f30>
inputs = (tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241... ..., -0.95041, -1.49072, -1.38614],
          [-1.28157, -1.42100, -1.22928,  ..., -0.74126, -1.12471, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241...1.48491, 0.82613, 0.95755, 0.84748, 0.71070, 0.72036, 1.10198, 0.52548, 1.24055, 0.97474]]]], requires_grad=True), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: TT_FATAL @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp:50: new_volume == old_volume
E       info:
E       Invalid arguments to reshape
E       backtrace:
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xbd288) [0x7f3525205288]
E        --- tt::tt_metal::infer_dims_for_reshape(tt::tt_metal::Tensor const&, tt::stl::Span<int const, 18446744073709551615ul>)
E        --- ttnn::operations::data_movement::ReshapeViewOperation::invoke(tt::stl::StrongType<unsigned char, ttnn::QueueIdTag>, tt::tt_metal::Tensor const&, tt::stl::Span<int const, 18446744073709551615ul>, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<std::variant<unsigned int, float> > const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIRRuntime.so(+0xe69c2) [0x7f352522e9c2]
E        --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::ReshapeOp const*, tt::runtime::ttnn::ProgramContext&)
E        --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
E        --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316d40) [0x7f3525670d40]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x316c8e) [0x7f3525670c8e]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0d75) [0x7f352543ad75]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x55c99f04cb32]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x55c99f03ca97]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x55c99f042574]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x55c99f05b172]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x55c99f03797f]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55c99f05b10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55c99f039c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x55c99f0389ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55c99f05a4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55c99f03c702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55c99f04261d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55c99f05762c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x55c99f15f464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55c99f04339b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55c99f03d99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55c99f04d38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x55c99f037790]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:254: RuntimeError

Check failure on line 95 in forge/test/models/onnx/vision/yolo/test_yolo_v5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_yolo_v5.test_yolov5_640x640[yolov5s]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <test.conftest.ForgePropertyStore object at 0x7f34c961b2e0>
size = 's'

    @pytest.mark.nightly
    @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size])
    def test_yolov5_640x640(record_forge_property, size):
        if size != "s":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="yolo_v5",
            variant="yolov5" + size,
            task="imgcls",
            source="torchhub",
            suffix="640x640",
        )
    
        # Record Forge Property
        record_forge_property("tags.model_name", module_name)
    
        framework_model, inputs, _ = generate_model_yoloV5I640_imgcls_torchhub_pytorch(
            "ultralytics/yolov5",
            size=size,
        )
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/yolo/test_yolo_v5.py:95: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:251: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:293: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_yolo_v5_yolov5s_imgcls_torchhub_640x640], graph_name='pt_yolo_v5_yolov5s_imgcls_torc...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f34ade56b30>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:985: RuntimeError